From ea6cb3c1b6c8888927fe4427151e767c2a632c31 Mon Sep 17 00:00:00 2001 From: Tim Usner Date: Thu, 25 Feb 2021 12:27:48 +0100 Subject: [PATCH 1/4] Update Go and dependencies --- .gitignore | 2 + api/v1alpha1/suite_test.go | 13 +- .../crd/bases/druid.gardener.cloud_etcds.yaml | 44 +- config/rbac/leader_election_role.yaml | 9 +- controllers/controller_ref_manager.go | 15 +- controllers/etcd_controller.go | 3 +- controllers/suite_test.go | 39 +- go.mod | 33 +- go.sum | 338 +- main.go | 2 +- pkg/client/kubernetes/apply.go | 7 +- .../docker/spdystream/CONTRIBUTING.md | 13 + vendor/github.com/docker/spdystream/LICENSE | 191 + .../github.com/docker/spdystream/LICENSE.docs | 425 + .../github.com/docker/spdystream/MAINTAINERS | 28 + vendor/github.com/docker/spdystream/README.md | 77 + .../docker/spdystream/connection.go | 958 + .../github.com/docker/spdystream/handlers.go | 38 + .../github.com/docker/spdystream/priority.go | 98 + .../docker/spdystream/spdy/dictionary.go | 187 + .../github.com/docker/spdystream/spdy/read.go | 348 + .../docker/spdystream/spdy/types.go | 275 + .../docker/spdystream/spdy/write.go | 318 + vendor/github.com/docker/spdystream/stream.go | 327 + vendor/github.com/docker/spdystream/utils.go | 16 + .../github.com/evanphx/json-patch/.travis.yml | 7 +- vendor/github.com/evanphx/json-patch/LICENSE | 2 +- .../github.com/evanphx/json-patch/README.md | 11 +- vendor/github.com/evanphx/json-patch/merge.go | 13 +- vendor/github.com/evanphx/json-patch/patch.go | 28 +- .../fsnotify/fsnotify/.editorconfig | 12 + .../fsnotify/fsnotify/.gitattributes | 1 + .../fsnotify/fsnotify}/.gitignore | 0 .../github.com/fsnotify/fsnotify/.travis.yml | 36 + .../fsnotify/fsnotify}/AUTHORS | 0 .../fsnotify/fsnotify}/CHANGELOG.md | 0 .../fsnotify/fsnotify}/CONTRIBUTING.md | 0 .../fsnotify/fsnotify}/LICENSE | 2 +- .../fsnotify/fsnotify}/README.md | 71 +- .../fsnotify/fsnotify}/fen.go | 0 .../fsnotify/fsnotify}/fsnotify.go | 4 +- vendor/github.com/fsnotify/fsnotify/go.mod | 5 + vendor/github.com/fsnotify/fsnotify/go.sum | 2 + .../fsnotify/fsnotify}/inotify.go | 0 .../fsnotify/fsnotify}/inotify_poller.go | 4 +- .../fsnotify/fsnotify}/kqueue.go | 0 .../fsnotify/fsnotify}/open_mode_bsd.go | 2 +- .../fsnotify/fsnotify}/open_mode_darwin.go | 2 +- .../fsnotify/fsnotify}/windows.go | 0 .../external-dns-management/LICENSE.md | 320 + .../external-dns-management/NOTICE.md | 2 + .../pkg/apis/dns/register.go | 25 + .../pkg/apis/dns/v1alpha1/dnsannotation.go | 81 + .../pkg/apis/dns/v1alpha1/dnsentry.go | 108 + .../pkg/apis/dns/v1alpha1/dnsowner.go | 73 + .../pkg/apis/dns/v1alpha1/dnsprovider.go | 105 + .../pkg/apis/dns/v1alpha1/doc.go | 21 + .../pkg/apis/dns/v1alpha1/register.go | 75 + .../pkg/apis/dns/v1alpha1/state.go | 24 + .../dns/v1alpha1/zz_generated.deepcopy.go | 606 + .../gardener-resource-manager/LICENSE.md | 288 + .../gardener-resource-manager/NOTICE.md | 15 + .../pkg/apis/resources/register.go | 18 + .../pkg/apis/resources/v1alpha1/doc.go | 24 + .../pkg/apis/resources/v1alpha1/register.go | 51 + .../pkg/apis/resources/v1alpha1/types.go | 170 + .../v1alpha1/zz_generated.deepcopy.go | 231 + vendor/github.com/gardener/gardener/NOTICE.md | 8 +- .../gardener/hack/.ci/component_descriptor | 22 +- .../gardener/hack/.ci/{ci.go => doc.go} | 2 - .../gardener/hack/.ci/prepare_release | 2 +- .../gardener/hack/.ci/set_dependency_version | 2 +- .../gardener/pkg/apis/core/field_constants.go | 11 + .../gardener/gardener/pkg/apis/core/types.go | 30 + .../pkg/apis/core/types_backupbucket.go | 5 +- .../pkg/apis/core/types_cloudprofile.go | 36 +- .../gardener/pkg/apis/core/types_common.go | 16 +- .../apis/core/types_controllerinstallation.go | 6 +- .../apis/core/types_controllerregistration.go | 28 +- .../gardener/pkg/apis/core/types_project.go | 29 +- .../gardener/pkg/apis/core/types_seed.go | 137 +- .../gardener/pkg/apis/core/types_shoot.go | 216 +- .../pkg/apis/core/types_shootstate.go | 14 +- .../gardener/pkg/apis/core/types_utils.go | 10 +- .../pkg/apis/core/v1alpha1/conversions.go | 410 + .../pkg/apis/core/v1alpha1/defaults.go | 361 + .../gardener/pkg/apis/core/v1alpha1/doc.go | 24 + .../pkg/apis/core/v1alpha1/generated.pb.go | 40856 ++++++++++++++++ .../pkg/apis/core/v1alpha1/generated.proto | 2344 + .../pkg/apis/core/v1alpha1/register.go | 77 + .../gardener/pkg/apis/core/v1alpha1/types.go | 21 + .../apis/core/v1alpha1/types_backupbucket.go | 91 + .../apis/core/v1alpha1/types_backupentry.go | 75 + .../apis/core/v1alpha1/types_cloudprofile.go | 226 + .../pkg/apis/core/v1alpha1/types_common.go | 141 + .../v1alpha1/types_controllerinstallation.go | 80 + .../v1alpha1/types_controllerregistration.go | 108 + .../pkg/apis/core/v1alpha1/types_plant.go | 112 + .../pkg/apis/core/v1alpha1/types_project.go | 176 + .../pkg/apis/core/v1alpha1/types_quota.go | 56 + .../apis/core/v1alpha1/types_secretbinding.go | 47 + .../pkg/apis/core/v1alpha1/types_seed.go | 307 + .../pkg/apis/core/v1alpha1/types_shoot.go | 1181 + .../apis/core/v1alpha1/types_shootstate.go | 100 + .../pkg/apis/core/v1alpha1/types_utils.go | 72 + .../core/v1alpha1/zz_generated.conversion.go | 5035 ++ .../core/v1alpha1/zz_generated.deepcopy.go | 4109 ++ .../core/v1alpha1/zz_generated.defaults.go | 138 + .../core/v1beta1/constants/types_constants.go | 116 +- .../pkg/apis/core/v1beta1/constants/utils.go | 24 +- .../pkg/apis/core/v1beta1/conversions.go | 19 +- .../pkg/apis/core/v1beta1/defaults.go | 175 +- .../pkg/apis/core/v1beta1/generated.pb.go | 15875 ++++-- .../pkg/apis/core/v1beta1/generated.proto | 523 +- .../core/v1beta1/helper/condition_builder.go | 39 +- .../pkg/apis/core/v1beta1/helper/errors.go | 108 +- .../pkg/apis/core/v1beta1/helper/helper.go | 728 +- .../gardener/pkg/apis/core/v1beta1/types.go | 21 + .../apis/core/v1beta1/types_backupbucket.go | 5 +- .../apis/core/v1beta1/types_cloudprofile.go | 34 +- .../pkg/apis/core/v1beta1/types_common.go | 74 +- .../v1beta1/types_controllerinstallation.go | 6 +- .../v1beta1/types_controllerregistration.go | 34 +- .../pkg/apis/core/v1beta1/types_project.go | 39 +- .../pkg/apis/core/v1beta1/types_seed.go | 149 +- .../pkg/apis/core/v1beta1/types_shoot.go | 277 +- .../pkg/apis/core/v1beta1/types_utils.go | 26 +- .../core/v1beta1/zz_generated.conversion.go | 982 +- .../core/v1beta1/zz_generated.deepcopy.go | 798 +- .../core/v1beta1/zz_generated.defaults.go | 37 +- .../pkg/apis/core/zz_generated.deepcopy.go | 850 +- .../pkg/apis/extensions/v1alpha1/types.go | 65 +- .../extensions/v1alpha1/types_backupbucket.go | 9 +- .../extensions/v1alpha1/types_backupentry.go | 12 +- .../apis/extensions/v1alpha1/types_cluster.go | 9 + .../v1alpha1/types_containerruntime.go | 10 +- .../extensions/v1alpha1/types_controlplane.go | 11 +- .../extensions/v1alpha1/types_defaults.go | 35 +- .../extensions/v1alpha1/types_extension.go | 8 +- .../v1alpha1/types_infrastructure.go | 9 +- .../apis/extensions/v1alpha1/types_network.go | 18 +- .../v1alpha1/types_operatingsystemconfig.go | 13 +- .../apis/extensions/v1alpha1/types_worker.go | 37 +- .../v1alpha1/zz_generated.deepcopy.go | 40 +- .../gardener/pkg/apis/seedmanagement/doc.go | 19 + .../apis/seedmanagement/field_constants.go | 23 + .../pkg/apis/seedmanagement/helper/helper.go | 85 + .../pkg/apis/seedmanagement/register.go | 52 + .../apis/seedmanagement/types_managedseed.go | 153 + .../seedmanagement/v1alpha1/conversions.go | 58 + .../apis/seedmanagement/v1alpha1/defaults.go | 147 + .../pkg/apis/seedmanagement/v1alpha1/doc.go | 26 + .../seedmanagement/v1alpha1/generated.pb.go | 2836 ++ .../seedmanagement/v1alpha1/generated.proto | 179 + .../apis/seedmanagement/v1alpha1/register.go | 55 + .../v1alpha1/types_managedseed.go | 180 + .../v1alpha1/zz_generated.conversion.go | 380 + .../v1alpha1/zz_generated.deepcopy.go | 302 + .../v1alpha1/zz_generated.defaults.go | 53 + .../seedmanagement/zz_generated.deepcopy.go | 300 + .../gardener/pkg/apis/settings/doc.go | 19 + .../gardener/pkg/apis/settings/register.go | 54 + .../types_cluster_openidconnect_preset.go | 69 + .../settings/types_openidconnect_preset.go | 55 + .../pkg/apis/settings/types_shared.go | 93 + .../pkg/apis/settings/v1alpha1/defaults.go | 55 + .../pkg/apis/settings/v1alpha1/doc.go | 26 + .../apis/settings/v1alpha1/generated.pb.go | 2621 + .../apis/settings/v1alpha1/generated.proto | 167 + .../pkg/apis/settings/v1alpha1/register.go | 57 + .../types_cluster_openidconnect_preset.go | 59 + .../v1alpha1/types_openidconnect_preset.go | 44 + .../apis/settings/v1alpha1/types_shared.go | 105 + .../v1alpha1/zz_generated.conversion.go | 330 + .../v1alpha1/zz_generated.deepcopy.go | 276 + .../v1alpha1/zz_generated.defaults.go | 60 + .../apis/settings/zz_generated.deepcopy.go | 276 + .../gardener/pkg/chartrenderer/default.go | 197 + .../gardener/pkg/chartrenderer/factory.go | 37 + .../gardener/pkg/chartrenderer/renderer.go | 32 + .../gardener/pkg/chartrenderer/sorter.go | 121 + .../core/clientset/versioned/clientset.go | 111 + .../client/core/clientset/versioned/doc.go | 20 + .../core/clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 58 + .../typed/core/v1alpha1/backupbucket.go | 184 + .../typed/core/v1alpha1/backupentry.go | 195 + .../typed/core/v1alpha1/cloudprofile.go | 168 + .../core/v1alpha1/controllerinstallation.go | 184 + .../core/v1alpha1/controllerregistration.go | 168 + .../typed/core/v1alpha1/core_client.go | 144 + .../versioned/typed/core/v1alpha1/doc.go | 20 + .../core/v1alpha1/generated_expansion.go | 43 + .../versioned/typed/core/v1alpha1/plant.go | 195 + .../versioned/typed/core/v1alpha1/project.go | 184 + .../versioned/typed/core/v1alpha1/quota.go | 178 + .../typed/core/v1alpha1/secretbinding.go | 178 + .../versioned/typed/core/v1alpha1/seed.go | 184 + .../versioned/typed/core/v1alpha1/shoot.go | 195 + .../typed/core/v1alpha1/shootstate.go | 178 + .../typed/core/v1beta1/backupbucket.go | 184 + .../typed/core/v1beta1/backupentry.go | 195 + .../typed/core/v1beta1/cloudprofile.go | 168 + .../core/v1beta1/controllerinstallation.go | 184 + .../core/v1beta1/controllerregistration.go | 168 + .../typed/core/v1beta1/core_client.go | 139 + .../versioned/typed/core/v1beta1/doc.go | 20 + .../typed/core/v1beta1/generated_expansion.go | 41 + .../versioned/typed/core/v1beta1/plant.go | 195 + .../versioned/typed/core/v1beta1/project.go | 184 + .../versioned/typed/core/v1beta1/quota.go | 178 + .../typed/core/v1beta1/secretbinding.go | 178 + .../versioned/typed/core/v1beta1/seed.go | 184 + .../versioned/typed/core/v1beta1/shoot.go | 195 + .../clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 56 + .../pkg/client/kubernetes/admissionplugins.go | 94 + .../gardener/pkg/client/kubernetes/applier.go | 432 + .../pkg/client/kubernetes/chartapplier.go | 124 + .../pkg/client/kubernetes/chartoptions.go | 125 + .../gardener/pkg/client/kubernetes/client.go | 357 + .../pkg/client/kubernetes/clientset.go | 176 + .../pkg/client/kubernetes/deployments.go | 64 + .../pkg/client/kubernetes/manifestoptions.go | 27 + .../gardener/pkg/client/kubernetes/options.go | 101 + .../gardener/pkg/client/kubernetes/pods.go | 171 + .../pkg/client/kubernetes/runtime_client.go | 157 + .../gardener/pkg/client/kubernetes/scaling.go | 99 + .../gardener/pkg/client/kubernetes/types.go | 183 + .../clientset/versioned/clientset.go | 97 + .../seedmanagement/clientset/versioned/doc.go | 20 + .../clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 56 + .../typed/seedmanagement/v1alpha1/doc.go | 20 + .../v1alpha1/generated_expansion.go | 21 + .../seedmanagement/v1alpha1/managedseed.go | 195 + .../v1alpha1/seedmanagement_client.go | 89 + .../clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 56 + .../gardener/pkg/gardenlet/apis/config/doc.go | 18 + .../gardenlet/apis/config/helper/helpers.go | 84 + .../pkg/gardenlet/apis/config/register.go | 51 + .../pkg/gardenlet/apis/config/types.go | 361 + .../apis/config/v1alpha1/defaults.go | 323 + .../pkg/gardenlet/apis/config/v1alpha1/doc.go | 20 + .../apis/config/v1alpha1/register.go | 54 + .../gardenlet/apis/config/v1alpha1/types.go | 472 + .../v1alpha1/zz_generated.conversion.go | 1127 + .../config/v1alpha1/zz_generated.deepcopy.go | 852 + .../config/v1alpha1/zz_generated.defaults.go | 93 + .../apis/config/zz_generated.deepcopy.go | 852 + .../gardener/gardener/pkg/logger/logger.go | 16 +- .../gardener/gardener/pkg/utils/checksums.go | 48 + .../gardener/pkg/utils/context/context.go | 11 + .../gardener/gardener/pkg/utils/encoding.go | 18 + .../gardener/pkg/utils/errors/multierror.go | 1 + .../pkg/utils/imagevector/imagevector.go | 11 +- .../pkg/utils/kubernetes/health/and.go | 35 + .../pkg/utils/kubernetes/health/health.go | 193 +- .../gardener/gardener/pkg/utils/labels.go | 28 + .../gardener/pkg/utils/miscellaneous.go | 35 + .../gardener/gardener/pkg/utils/object.go | 180 + .../gardener/gardener/pkg/utils/random.go | 26 +- .../gardener/pkg/utils/retry/retry.go | 50 +- .../gardener/gardener/pkg/utils/timewindow.go | 23 +- .../gardener/pkg/utils/version/version.go | 2 +- .../gardener/hvpa-controller/LICENSE.md | 296 + .../api/v1alpha1/groupversion_info.go | 68 + .../api/v1alpha1/hvpa_types.go | 352 + .../api/v1alpha1/hvpa_webhook.go | 75 + .../api/v1alpha1/zz_generated.deepcopy.go | 498 + vendor/github.com/go-logr/logr/README.md | 161 +- vendor/github.com/go-logr/logr/discard.go | 35 + vendor/github.com/go-logr/logr/go.mod | 3 + vendor/github.com/go-logr/logr/logr.go | 207 +- vendor/github.com/go-logr/zapr/README.md | 4 +- vendor/github.com/go-logr/zapr/go.mod | 10 + vendor/github.com/go-logr/zapr/zapr.go | 106 +- .../gogo/protobuf/gogoproto/Makefile | 37 + .../github.com/gogo/protobuf/gogoproto/doc.go | 169 + .../gogo/protobuf/gogoproto/gogo.pb.go | 874 + .../gogo/protobuf/gogoproto/gogo.pb.golden | 45 + .../gogo/protobuf/gogoproto/gogo.proto | 144 + .../gogo/protobuf/gogoproto/helper.go | 415 + .../github.com/gogo/protobuf/jsonpb/jsonpb.go | 1435 + .../protoc-gen-gogo/descriptor/Makefile | 36 + .../protoc-gen-gogo/descriptor/descriptor.go | 118 + .../descriptor/descriptor.pb.go | 2865 ++ .../descriptor/descriptor_gostring.gen.go | 752 + .../protoc-gen-gogo/descriptor/helper.go | 390 + vendor/github.com/gogo/protobuf/types/any.go | 140 + .../github.com/gogo/protobuf/types/any.pb.go | 697 + .../github.com/gogo/protobuf/types/api.pb.go | 2143 + vendor/github.com/gogo/protobuf/types/doc.go | 35 + .../gogo/protobuf/types/duration.go | 100 + .../gogo/protobuf/types/duration.pb.go | 520 + .../gogo/protobuf/types/duration_gogo.go | 100 + .../gogo/protobuf/types/empty.pb.go | 465 + .../gogo/protobuf/types/field_mask.pb.go | 741 + .../gogo/protobuf/types/protosize.go | 34 + .../gogo/protobuf/types/source_context.pb.go | 527 + .../gogo/protobuf/types/struct.pb.go | 2280 + .../gogo/protobuf/types/timestamp.go | 130 + .../gogo/protobuf/types/timestamp.pb.go | 542 + .../gogo/protobuf/types/timestamp_gogo.go | 94 + .../github.com/gogo/protobuf/types/type.pb.go | 3370 ++ .../gogo/protobuf/types/wrappers.pb.go | 2730 ++ .../gogo/protobuf/types/wrappers_gogo.go | 300 + .../golang/protobuf/proto/buffer.go | 324 + .../github.com/golang/protobuf/proto/clone.go | 253 - .../golang/protobuf/proto/decode.go | 427 - .../golang/protobuf/proto/defaults.go | 63 + .../golang/protobuf/proto/deprecated.go | 126 +- .../golang/protobuf/proto/discard.go | 356 +- .../golang/protobuf/proto/encode.go | 203 - .../github.com/golang/protobuf/proto/equal.go | 301 - .../golang/protobuf/proto/extensions.go | 771 +- .../github.com/golang/protobuf/proto/lib.go | 965 - .../golang/protobuf/proto/message_set.go | 181 - .../golang/protobuf/proto/pointer_reflect.go | 360 - .../golang/protobuf/proto/pointer_unsafe.go | 313 - .../golang/protobuf/proto/properties.go | 648 +- .../github.com/golang/protobuf/proto/proto.go | 167 + .../golang/protobuf/proto/registry.go | 323 + .../golang/protobuf/proto/table_marshal.go | 2776 -- .../golang/protobuf/proto/table_merge.go | 654 - .../golang/protobuf/proto/table_unmarshal.go | 2053 - .../github.com/golang/protobuf/proto/text.go | 843 - .../golang/protobuf/proto/text_decode.go | 801 + .../golang/protobuf/proto/text_encode.go | 560 + .../golang/protobuf/proto/text_parser.go | 880 - .../github.com/golang/protobuf/proto/wire.go | 78 + .../golang/protobuf/proto/wrappers.go | 34 + .../github.com/golang/protobuf/ptypes/any.go | 214 +- .../golang/protobuf/ptypes/any/any.pb.go | 230 +- .../golang/protobuf/ptypes/any/any.proto | 154 - .../github.com/golang/protobuf/ptypes/doc.go | 37 +- .../golang/protobuf/ptypes/duration.go | 116 +- .../protobuf/ptypes/duration/duration.pb.go | 192 +- .../protobuf/ptypes/duration/duration.proto | 117 - .../golang/protobuf/ptypes/timestamp.go | 109 +- .../protobuf/ptypes/timestamp/timestamp.pb.go | 211 +- .../protobuf/ptypes/timestamp/timestamp.proto | 135 - .../github.com/google/go-cmp/cmp/compare.go | 166 +- .../google/go-cmp/cmp/export_panic.go | 6 +- .../google/go-cmp/cmp/export_unsafe.go | 20 +- .../google/go-cmp/cmp/internal/diff/diff.go | 22 +- .../google/go-cmp/cmp/internal/value/name.go | 157 + .../cmp/internal/value/pointer_purego.go | 10 + .../cmp/internal/value/pointer_unsafe.go | 10 + .../github.com/google/go-cmp/cmp/options.go | 58 +- vendor/github.com/google/go-cmp/cmp/path.go | 78 +- vendor/github.com/google/go-cmp/cmp/report.go | 5 +- .../google/go-cmp/cmp/report_compare.go | 200 +- .../google/go-cmp/cmp/report_references.go | 264 + .../google/go-cmp/cmp/report_reflect.go | 292 +- .../google/go-cmp/cmp/report_slices.go | 135 +- .../google/go-cmp/cmp/report_text.go | 86 +- vendor/github.com/google/gofuzz/README.md | 2 +- vendor/github.com/google/gofuzz/fuzz.go | 35 +- .../googleapis/gnostic/OpenAPIv2/OpenAPIv2.go | 3306 +- .../gnostic/OpenAPIv2/OpenAPIv2.pb.go | 8201 ++-- .../gnostic/OpenAPIv2/OpenAPIv2.proto | 7 +- .../googleapis/gnostic/OpenAPIv2/README.md | 20 +- .../googleapis/gnostic/OpenAPIv2/document.go | 26 + .../gnostic/OpenAPIv2/openapi-2.0.json | 4 +- .../googleapis/gnostic/compiler/README.md | 3 +- .../googleapis/gnostic/compiler/context.go | 2 +- .../googleapis/gnostic/compiler/error.go | 2 +- .../gnostic/compiler/extension-handler.go | 101 - .../googleapis/gnostic/compiler/extensions.go | 85 + .../googleapis/gnostic/compiler/helpers.go | 336 +- .../googleapis/gnostic/compiler/main.go | 2 +- .../googleapis/gnostic/compiler/reader.go | 133 +- .../googleapis/gnostic/extensions/README.md | 12 +- .../gnostic/extensions/extension.pb.go | 529 +- .../gnostic/extensions/extension.proto | 29 +- .../gnostic/extensions/extensions.go | 68 +- .../googleapis/gnostic/jsonschema/README.md | 4 + .../googleapis/gnostic/jsonschema/base.go | 84 + .../googleapis/gnostic/jsonschema/display.go | 229 + .../googleapis/gnostic/jsonschema/models.go | 228 + .../gnostic/jsonschema/operations.go | 394 + .../googleapis/gnostic/jsonschema/reader.go | 442 + .../googleapis/gnostic/jsonschema/schema.json | 150 + .../googleapis/gnostic/jsonschema/writer.go | 369 + vendor/github.com/hashicorp/golang-lru/lru.go | 22 +- vendor/github.com/hpcloud/tail/.gitignore | 3 - vendor/github.com/hpcloud/tail/CHANGES.md | 63 - vendor/github.com/hpcloud/tail/Dockerfile | 19 - vendor/github.com/hpcloud/tail/Makefile | 11 - vendor/github.com/hpcloud/tail/README.md | 28 - .../github.com/imdario/mergo/.deepsource.toml | 12 + vendor/github.com/imdario/mergo/README.md | 50 +- vendor/github.com/imdario/mergo/doc.go | 141 +- vendor/github.com/imdario/mergo/go.mod | 5 + vendor/github.com/imdario/mergo/go.sum | 4 + vendor/github.com/imdario/mergo/map.go | 3 + vendor/github.com/imdario/mergo/merge.go | 141 +- vendor/github.com/imdario/mergo/mergo.go | 21 +- vendor/github.com/json-iterator/go/README.md | 36 +- vendor/github.com/json-iterator/go/any_str.go | 4 +- vendor/github.com/json-iterator/go/config.go | 4 +- .../json-iterator/go/iter_object.go | 4 +- .../json-iterator/go/reflect_extension.go | 2 +- .../json-iterator/go/reflect_map.go | 80 +- .../json-iterator/go/reflect_optional.go | 4 - .../go/reflect_struct_decoder.go | 22 +- vendor/github.com/json-iterator/go/stream.go | 5 +- .../go-windows-terminal-sequences/README.md | 1 + .../sequences.go | 3 +- vendor/github.com/nxadm/tail/.gitignore | 2 + .../{hpcloud => nxadm}/tail/.travis.yml | 12 +- vendor/github.com/nxadm/tail/CHANGES.md | 46 + vendor/github.com/nxadm/tail/Dockerfile | 19 + .../tail/LICENSE.txt => nxadm/tail/LICENSE} | 0 vendor/github.com/nxadm/tail/README.md | 36 + .../{hpcloud => nxadm}/tail/appveyor.yml | 2 +- vendor/github.com/nxadm/tail/go.mod | 9 + vendor/github.com/nxadm/tail/go.sum | 6 + .../tail/ratelimiter/Licence | 0 .../tail/ratelimiter/leakybucket.go | 0 .../tail/ratelimiter/memory.go | 10 +- .../tail/ratelimiter/storage.go | 0 .../{hpcloud => nxadm}/tail/tail.go | 82 +- .../{hpcloud => nxadm}/tail/tail_posix.go | 2 +- .../{hpcloud => nxadm}/tail/tail_windows.go | 2 +- .../{hpcloud => nxadm}/tail/util/util.go | 2 +- .../tail/watch/filechanges.go | 2 +- .../{hpcloud => nxadm}/tail/watch/inotify.go | 13 +- .../tail/watch/inotify_tracker.go | 90 +- .../{hpcloud => nxadm}/tail/watch/polling.go | 2 +- .../{hpcloud => nxadm}/tail/watch/watch.go | 0 .../tail/winfile/winfile.go | 0 vendor/github.com/onsi/ginkgo/.travis.yml | 26 +- vendor/github.com/onsi/ginkgo/CHANGELOG.md | 92 +- vendor/github.com/onsi/ginkgo/README.md | 100 +- .../github.com/onsi/ginkgo/config/config.go | 11 +- .../onsi/ginkgo/extensions/table/table.go | 56 +- .../ginkgo/extensions/table/table_entry.go | 108 +- vendor/github.com/onsi/ginkgo/ginkgo_dsl.go | 90 +- vendor/github.com/onsi/ginkgo/go.mod | 11 + vendor/github.com/onsi/ginkgo/go.sum | 67 + .../onsi/ginkgo/internal/global/init.go | 22 + .../onsi/ginkgo/internal/remote/aggregator.go | 6 +- .../remote/output_interceptor_darwin.go | 11 + .../remote/output_interceptor_dragonfly.go | 11 + .../remote/output_interceptor_freebsd.go | 11 + .../remote/output_interceptor_linux.go | 12 + .../output_interceptor_linux_mips64le.go | 12 + .../remote/output_interceptor_netbsd.go | 11 + .../remote/output_interceptor_openbsd.go | 11 + .../remote/output_interceptor_solaris.go | 11 + .../remote/output_interceptor_unix.go | 10 +- .../remote/syscall_dup_linux_arm64.go | 11 - .../internal/remote/syscall_dup_solaris.go | 9 - .../internal/remote/syscall_dup_unix.go | 11 - .../onsi/ginkgo/internal/suite/suite.go | 57 +- .../onsi/ginkgo/reporters/default_reporter.go | 6 +- .../onsi/ginkgo/reporters/junit_reporter.go | 26 +- .../stenographer/fake_stenographer.go | 12 +- .../reporters/stenographer/stenographer.go | 12 +- .../ginkgo/reporters/teamcity_reporter.go | 36 +- vendor/github.com/onsi/ginkgo/types/types.go | 2 +- vendor/github.com/onsi/gomega/.travis.yml | 8 +- vendor/github.com/onsi/gomega/CHANGELOG.md | 60 + .../github.com/onsi/gomega/format/format.go | 8 +- .../onsi/gomega/gbytes/io_wrappers.go | 6 +- vendor/github.com/onsi/gomega/go.mod | 17 +- vendor/github.com/onsi/gomega/go.sum | 46 +- vendor/github.com/onsi/gomega/gomega_dsl.go | 83 +- .../gomega/internal/assertion/assertion.go | 10 +- .../asyncassertion/async_assertion.go | 10 +- vendor/github.com/onsi/gomega/matchers.go | 33 + .../onsi/gomega/matchers/consist_of.go | 110 +- .../matchers/contain_elements_matcher.go | 44 + .../matchers/have_http_status_matcher.go | 42 + .../gomega/matchers/match_error_matcher.go | 18 +- .../onsi/gomega/matchers/panic_matcher.go | 76 +- .../goraph/bipartitegraph/bipartitegraph.go | 26 +- .../bipartitegraph/bipartitegraphmatching.go | 37 +- .../matchers/support/goraph/edge/edge.go | 8 +- .../matchers/support/goraph/node/node.go | 3 +- .../client_golang/prometheus/counter.go | 50 +- .../client_golang/prometheus/desc.go | 1 + .../client_golang/prometheus/doc.go | 37 +- .../client_golang/prometheus/gauge.go | 2 +- .../client_golang/prometheus/go_collector.go | 2 +- .../client_golang/prometheus/histogram.go | 113 +- .../client_golang/prometheus/metric.go | 1 + .../client_golang/prometheus/observer.go | 12 + .../prometheus/process_collector_windows.go | 24 +- .../prometheus/promhttp/delegator.go | 10 +- .../client_golang/prometheus/promhttp/http.go | 82 +- .../client_golang/prometheus/registry.go | 1 + .../client_golang/prometheus/summary.go | 1 + .../client_golang/prometheus/value.go | 51 +- .../client_golang/prometheus/vec.go | 12 + .../client_golang/prometheus/wrap.go | 14 +- .../prometheus/client_model/go/metrics.pb.go | 268 +- .../prometheus/common/expfmt/encode.go | 124 +- .../prometheus/common/expfmt/expfmt.go | 11 +- .../common/expfmt/openmetrics_create.go | 527 + .../prometheus/common/expfmt/text_create.go | 3 +- .../prometheus/common/model/time.go | 4 + .../prometheus/procfs/.golangci.yml | 4 +- .../prometheus/procfs/Makefile.common | 43 +- .../github.com/prometheus/procfs/cpuinfo.go | 265 +- .../prometheus/procfs/cpuinfo_arm.go | 18 + .../prometheus/procfs/cpuinfo_arm64.go | 19 + .../prometheus/procfs/cpuinfo_default.go | 19 + .../prometheus/procfs/cpuinfo_mips.go | 18 + .../prometheus/procfs/cpuinfo_mips64.go | 18 + .../prometheus/procfs/cpuinfo_mips64le.go | 18 + .../prometheus/procfs/cpuinfo_mipsle.go | 18 + .../prometheus/procfs/cpuinfo_ppc64.go | 18 + .../prometheus/procfs/cpuinfo_ppc64le.go | 18 + .../prometheus/procfs/cpuinfo_s390x.go | 18 + vendor/github.com/prometheus/procfs/crypto.go | 160 +- .../prometheus/procfs/fixtures.ttar | 806 +- .../github.com/prometheus/procfs/fscache.go | 422 + vendor/github.com/prometheus/procfs/go.mod | 1 + vendor/github.com/prometheus/procfs/go.sum | 2 + .../prometheus/procfs/internal/util/parse.go | 9 + .../prometheus/procfs/kernel_random.go | 62 + .../github.com/prometheus/procfs/loadavg.go | 62 + vendor/github.com/prometheus/procfs/mdstat.go | 2 +- .../github.com/prometheus/procfs/mountinfo.go | 20 +- .../prometheus/procfs/mountstats.go | 20 +- .../prometheus/procfs/net_conntrackstat.go | 153 + .../prometheus/procfs/net_softnet.go | 107 +- .../github.com/prometheus/procfs/net_udp.go | 229 + .../github.com/prometheus/procfs/net_unix.go | 222 +- vendor/github.com/prometheus/procfs/proc.go | 21 + .../prometheus/procfs/proc_cgroup.go | 98 + .../prometheus/procfs/proc_fdinfo.go | 34 +- .../github.com/prometheus/procfs/proc_maps.go | 209 + .../prometheus/procfs/proc_smaps.go | 165 + .../prometheus/procfs/proc_status.go | 37 +- vendor/github.com/prometheus/procfs/swaps.go | 89 + .../github.com/sirupsen/logrus/.golangci.yml | 40 + vendor/github.com/sirupsen/logrus/.travis.yml | 14 +- .../github.com/sirupsen/logrus/CHANGELOG.md | 27 +- vendor/github.com/sirupsen/logrus/README.md | 44 +- .../github.com/sirupsen/logrus/appveyor.yml | 28 +- vendor/github.com/sirupsen/logrus/entry.go | 47 +- vendor/github.com/sirupsen/logrus/exported.go | 2 +- vendor/github.com/sirupsen/logrus/go.mod | 5 +- vendor/github.com/sirupsen/logrus/go.sum | 8 +- .../sirupsen/logrus/json_formatter.go | 4 + vendor/github.com/sirupsen/logrus/logger.go | 11 +- vendor/github.com/sirupsen/logrus/logrus.go | 2 +- .../sirupsen/logrus/terminal_check_bsd.go | 2 +- .../sirupsen/logrus/terminal_check_js.go | 7 + .../sirupsen/logrus/terminal_check_unix.go | 2 +- .../sirupsen/logrus/text_formatter.go | 55 +- vendor/github.com/sirupsen/logrus/writer.go | 6 + vendor/go.uber.org/atomic/CHANGELOG.md | 5 + vendor/go.uber.org/atomic/tools.go | 28 - vendor/go.uber.org/multierr/CHANGELOG.md | 6 + vendor/go.uber.org/multierr/error.go | 2 +- vendor/go.uber.org/multierr/go.mod | 2 +- vendor/go.uber.org/multierr/go.sum | 4 +- vendor/go.uber.org/multierr/tools.go | 30 - vendor/go.uber.org/tools/LICENSE | 19 - .../tools/update-license/.gitignore | 1 - .../tools/update-license/README.md | 24 - .../tools/update-license/licenses.go | 56 - .../go.uber.org/tools/update-license/main.go | 228 - vendor/go.uber.org/zap/.travis.yml | 2 +- vendor/go.uber.org/zap/CHANGELOG.md | 49 + vendor/go.uber.org/zap/Makefile | 8 +- vendor/go.uber.org/zap/buffer/buffer.go | 10 +- vendor/go.uber.org/zap/config.go | 29 +- vendor/go.uber.org/zap/encoder.go | 4 + vendor/go.uber.org/zap/field.go | 8 + vendor/go.uber.org/zap/go.mod | 5 +- vendor/go.uber.org/zap/go.sum | 8 +- vendor/go.uber.org/zap/logger.go | 6 + vendor/go.uber.org/zap/options.go | 30 +- vendor/go.uber.org/zap/tools.go | 28 - vendor/go.uber.org/zap/zapcore/encoder.go | 36 +- vendor/go.uber.org/zap/zapcore/error.go | 5 - vendor/go.uber.org/zap/zapcore/field.go | 7 +- .../go.uber.org/zap/zapcore/increase_level.go | 66 + .../go.uber.org/zap/zapcore/json_encoder.go | 7 + vendor/go.uber.org/zap/zapcore/sampler.go | 94 +- .../x/crypto/ssh/terminal/terminal.go | 8 + vendor/golang.org/x/lint/.travis.yml | 19 - vendor/golang.org/x/lint/CONTRIBUTING.md | 15 - vendor/golang.org/x/lint/README.md | 88 - vendor/golang.org/x/lint/go.mod | 5 - vendor/golang.org/x/lint/go.sum | 8 - vendor/golang.org/x/lint/golint/golint.go | 159 - vendor/golang.org/x/lint/golint/import.go | 309 - .../golang.org/x/lint/golint/importcomment.go | 13 - vendor/golang.org/x/lint/lint.go | 1614 - vendor/golang.org/x/net/html/const.go | 2 +- vendor/golang.org/x/net/html/foreign.go | 119 +- vendor/golang.org/x/net/html/parse.go | 15 +- vendor/golang.org/x/net/html/render.go | 2 +- .../x/net/http2/client_conn_pool.go | 8 +- vendor/golang.org/x/net/http2/flow.go | 2 + .../golang.org/x/net/http2/hpack/huffman.go | 7 + vendor/golang.org/x/net/http2/http2.go | 7 + vendor/golang.org/x/net/http2/server.go | 20 +- vendor/golang.org/x/net/http2/transport.go | 137 +- .../idna/{tables12.00.go => tables12.0.0.go} | 2 +- vendor/golang.org/x/net/idna/tables13.0.0.go | 4839 ++ .../sys/internal/unsafeheader/unsafeheader.go | 30 + vendor/golang.org/x/sys/unix/README.md | 4 +- .../x/sys/unix/asm_openbsd_mips64.s | 29 + vendor/golang.org/x/sys/unix/fcntl_darwin.go | 6 + .../x/sys/unix/fcntl_linux_32bit.go | 4 +- vendor/golang.org/x/sys/unix/gccgo.go | 2 - vendor/golang.org/x/sys/unix/gccgo_c.c | 6 + vendor/golang.org/x/sys/unix/ioctl.go | 9 + vendor/golang.org/x/sys/unix/mkall.sh | 26 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 30 +- .../x/sys/unix/sockcmsg_unix_other.go | 6 +- vendor/golang.org/x/sys/unix/syscall.go | 43 +- vendor/golang.org/x/sys/unix/syscall_aix.go | 16 + vendor/golang.org/x/sys/unix/syscall_bsd.go | 36 +- .../x/sys/unix/syscall_darwin.1_12.go | 4 +- .../x/sys/unix/syscall_darwin.1_13.go | 21 +- .../golang.org/x/sys/unix/syscall_darwin.go | 142 +- .../x/sys/unix/syscall_darwin_386.go | 22 +- .../x/sys/unix/syscall_darwin_amd64.go | 22 +- .../x/sys/unix/syscall_darwin_arm.go | 19 +- .../x/sys/unix/syscall_darwin_arm64.go | 24 +- .../x/sys/unix/syscall_dragonfly.go | 19 +- .../golang.org/x/sys/unix/syscall_freebsd.go | 19 +- .../golang.org/x/sys/unix/syscall_illumos.go | 90 + vendor/golang.org/x/sys/unix/syscall_linux.go | 183 +- .../x/sys/unix/syscall_linux_386.go | 5 +- .../x/sys/unix/syscall_linux_amd64.go | 2 +- .../x/sys/unix/syscall_linux_arm.go | 7 +- .../x/sys/unix/syscall_linux_arm64.go | 28 +- .../x/sys/unix/syscall_linux_gc_arm.go | 13 + .../x/sys/unix/syscall_linux_mips64x.go | 2 +- .../x/sys/unix/syscall_linux_mipsx.go | 2 +- .../x/sys/unix/syscall_linux_ppc64x.go | 2 +- .../x/sys/unix/syscall_linux_riscv64.go | 8 +- .../x/sys/unix/syscall_linux_s390x.go | 2 +- .../x/sys/unix/syscall_linux_sparc64.go | 2 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 19 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 19 +- .../x/sys/unix/syscall_openbsd_mips64.go | 35 + .../golang.org/x/sys/unix/syscall_solaris.go | 7 +- vendor/golang.org/x/sys/unix/syscall_unix.go | 19 +- .../x/sys/unix/zerrors_darwin_386.go | 4 + .../x/sys/unix/zerrors_darwin_amd64.go | 4 + .../x/sys/unix/zerrors_darwin_arm.go | 4 + .../x/sys/unix/zerrors_darwin_arm64.go | 4 + .../x/sys/unix/zerrors_dragonfly_amd64.go | 138 +- .../x/sys/unix/zerrors_freebsd_386.go | 6 + .../x/sys/unix/zerrors_freebsd_amd64.go | 6 + .../x/sys/unix/zerrors_freebsd_arm.go | 6 + .../x/sys/unix/zerrors_freebsd_arm64.go | 6 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 327 +- .../x/sys/unix/zerrors_linux_386.go | 8 + .../x/sys/unix/zerrors_linux_amd64.go | 8 + .../x/sys/unix/zerrors_linux_arm.go | 8 + .../x/sys/unix/zerrors_linux_arm64.go | 9 + .../x/sys/unix/zerrors_linux_mips.go | 8 + .../x/sys/unix/zerrors_linux_mips64.go | 8 + .../x/sys/unix/zerrors_linux_mips64le.go | 8 + .../x/sys/unix/zerrors_linux_mipsle.go | 8 + .../x/sys/unix/zerrors_linux_ppc64.go | 8 + .../x/sys/unix/zerrors_linux_ppc64le.go | 8 + .../x/sys/unix/zerrors_linux_riscv64.go | 8 + .../x/sys/unix/zerrors_linux_s390x.go | 8 + .../x/sys/unix/zerrors_linux_sparc64.go | 8 + .../x/sys/unix/zerrors_netbsd_386.go | 6 + .../x/sys/unix/zerrors_netbsd_amd64.go | 6 + .../x/sys/unix/zerrors_netbsd_arm.go | 6 + .../x/sys/unix/zerrors_netbsd_arm64.go | 6 + .../x/sys/unix/zerrors_openbsd_386.go | 7 + .../x/sys/unix/zerrors_openbsd_amd64.go | 7 + .../x/sys/unix/zerrors_openbsd_arm.go | 7 + .../x/sys/unix/zerrors_openbsd_arm64.go | 7 + .../x/sys/unix/zerrors_openbsd_mips64.go | 1862 + .../x/sys/unix/zerrors_solaris_amd64.go | 22 +- .../x/sys/unix/zsyscall_darwin_386.1_11.go | 1811 - .../x/sys/unix/zsyscall_darwin_386.go | 169 +- .../x/sys/unix/zsyscall_darwin_386.s | 18 +- .../x/sys/unix/zsyscall_darwin_amd64.1_11.go | 1811 - .../x/sys/unix/zsyscall_darwin_amd64.go | 169 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 18 +- .../x/sys/unix/zsyscall_darwin_arm.1_11.go | 1784 - .../x/sys/unix/zsyscall_darwin_arm.go | 139 +- .../x/sys/unix/zsyscall_darwin_arm.s | 14 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 154 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 16 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 32 +- .../x/sys/unix/zsyscall_illumos_amd64.go | 114 + .../golang.org/x/sys/unix/zsyscall_linux.go | 108 + .../x/sys/unix/zsyscall_linux_386.go | 2 +- .../x/sys/unix/zsyscall_linux_amd64.go | 2 +- .../x/sys/unix/zsyscall_linux_arm.go | 2 +- .../x/sys/unix/zsyscall_linux_arm64.go | 4 +- .../x/sys/unix/zsyscall_linux_mips.go | 2 +- .../x/sys/unix/zsyscall_linux_mips64.go | 2 +- .../x/sys/unix/zsyscall_linux_mips64le.go | 2 +- .../x/sys/unix/zsyscall_linux_mipsle.go | 2 +- .../x/sys/unix/zsyscall_linux_ppc64.go | 2 +- .../x/sys/unix/zsyscall_linux_ppc64le.go | 2 +- .../x/sys/unix/zsyscall_linux_s390x.go | 2 +- .../x/sys/unix/zsyscall_linux_sparc64.go | 2 +- ...m64.1_11.go => zsyscall_openbsd_mips64.go} | 514 +- .../x/sys/unix/zsysctl_openbsd_386.go | 3 +- .../x/sys/unix/zsysctl_openbsd_amd64.go | 1 + .../x/sys/unix/zsysctl_openbsd_arm.go | 1 + .../x/sys/unix/zsysctl_openbsd_mips64.go | 279 + .../x/sys/unix/zsysnum_darwin_386.go | 1 + .../x/sys/unix/zsysnum_darwin_amd64.go | 1 + .../x/sys/unix/zsysnum_darwin_arm.go | 1 + .../x/sys/unix/zsysnum_darwin_arm64.go | 1 + .../x/sys/unix/zsysnum_dragonfly_amd64.go | 255 +- .../x/sys/unix/zsysnum_linux_386.go | 4 + .../x/sys/unix/zsysnum_linux_amd64.go | 4 + .../x/sys/unix/zsysnum_linux_arm.go | 4 + .../x/sys/unix/zsysnum_linux_arm64.go | 4 + .../x/sys/unix/zsysnum_linux_mips.go | 4 + .../x/sys/unix/zsysnum_linux_mips64.go | 4 + .../x/sys/unix/zsysnum_linux_mips64le.go | 4 + .../x/sys/unix/zsysnum_linux_mipsle.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 4 + .../x/sys/unix/zsysnum_linux_riscv64.go | 4 + .../x/sys/unix/zsysnum_linux_s390x.go | 4 + .../x/sys/unix/zsysnum_linux_sparc64.go | 4 + .../x/sys/unix/zsysnum_openbsd_mips64.go | 220 + .../x/sys/unix/ztypes_darwin_386.go | 32 +- .../x/sys/unix/ztypes_darwin_amd64.go | 43 +- .../x/sys/unix/ztypes_darwin_arm.go | 39 +- .../x/sys/unix/ztypes_darwin_arm64.go | 43 +- .../x/sys/unix/ztypes_dragonfly_amd64.go | 46 +- .../x/sys/unix/ztypes_freebsd_arm.go | 12 +- vendor/golang.org/x/sys/unix/ztypes_linux.go | 1570 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 21 + .../x/sys/unix/ztypes_linux_amd64.go | 24 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 24 + .../x/sys/unix/ztypes_linux_arm64.go | 24 + .../x/sys/unix/ztypes_linux_mips.go | 24 + .../x/sys/unix/ztypes_linux_mips64.go | 24 + .../x/sys/unix/ztypes_linux_mips64le.go | 24 + .../x/sys/unix/ztypes_linux_mipsle.go | 24 + .../x/sys/unix/ztypes_linux_ppc64.go | 24 + .../x/sys/unix/ztypes_linux_ppc64le.go | 24 + .../x/sys/unix/ztypes_linux_riscv64.go | 24 + .../x/sys/unix/ztypes_linux_s390x.go | 24 + .../x/sys/unix/ztypes_linux_sparc64.go | 24 + .../x/sys/unix/ztypes_openbsd_mips64.go | 565 + .../x/sys/unix/ztypes_solaris_amd64.go | 31 +- .../golang.org/x/sys/windows/dll_windows.go | 29 + .../golang.org/x/sys/windows/env_windows.go | 11 +- .../x/sys/windows/memory_windows.go | 25 +- .../x/sys/windows/security_windows.go | 20 +- vendor/golang.org/x/sys/windows/service.go | 2 + .../x/sys/windows/setupapierrors_windows.go | 100 + vendor/golang.org/x/sys/windows/syscall.go | 46 +- .../x/sys/windows/syscall_windows.go | 70 +- .../golang.org/x/sys/windows/types_windows.go | 12 - .../x/sys/windows/types_windows_386.go | 13 + .../x/sys/windows/types_windows_amd64.go | 12 + .../x/sys/windows/types_windows_arm.go | 13 + .../x/sys/windows/zsyscall_windows.go | 4364 +- .../x/text/encoding/unicode/unicode.go | 94 +- .../golang.org/x/text/transform/transform.go | 6 +- vendor/golang.org/x/text/unicode/bidi/core.go | 8 +- .../x/text/unicode/bidi/tables11.0.0.go | 2 +- .../x/text/unicode/bidi/tables12.0.0.go | 1923 + .../x/text/unicode/norm/tables11.0.0.go | 2 +- .../x/text/unicode/norm/tables12.0.0.go | 7710 +++ vendor/golang.org/x/time/rate/rate.go | 12 +- vendor/golang.org/x/tools/LICENSE | 27 - .../x/tools/go/analysis/analysis.go | 237 - .../x/tools/go/analysis/diagnostic.go | 61 - vendor/golang.org/x/tools/go/analysis/doc.go | 301 - .../go/analysis/passes/inspect/inspect.go | 49 - .../x/tools/go/analysis/validate.go | 97 - .../x/tools/go/ast/astutil/enclosing.go | 627 - .../x/tools/go/ast/astutil/imports.go | 482 - .../x/tools/go/ast/astutil/rewrite.go | 477 - .../golang.org/x/tools/go/ast/astutil/util.go | 14 - .../x/tools/go/ast/inspector/inspector.go | 182 - .../x/tools/go/ast/inspector/typeof.go | 216 - .../x/tools/go/buildutil/allpackages.go | 198 - .../x/tools/go/buildutil/fakecontext.go | 109 - .../x/tools/go/buildutil/overlay.go | 103 - .../golang.org/x/tools/go/buildutil/tags.go | 75 - .../golang.org/x/tools/go/buildutil/util.go | 212 - .../x/tools/go/gcexportdata/gcexportdata.go | 109 - .../x/tools/go/gcexportdata/importer.go | 73 - .../x/tools/go/internal/gcimporter/bexport.go | 852 - .../x/tools/go/internal/gcimporter/bimport.go | 1039 - .../go/internal/gcimporter/exportdata.go | 93 - .../go/internal/gcimporter/gcimporter.go | 1078 - .../x/tools/go/internal/gcimporter/iexport.go | 739 - .../x/tools/go/internal/gcimporter/iimport.go | 630 - .../go/internal/gcimporter/newInterface10.go | 21 - .../go/internal/gcimporter/newInterface11.go | 13 - .../tools/go/internal/packagesdriver/sizes.go | 117 - vendor/golang.org/x/tools/go/packages/doc.go | 221 - .../x/tools/go/packages/external.go | 101 - .../golang.org/x/tools/go/packages/golist.go | 889 - .../x/tools/go/packages/golist_overlay.go | 386 - .../x/tools/go/packages/loadmode_string.go | 57 - .../x/tools/go/packages/packages.go | 1159 - .../golang.org/x/tools/go/packages/visit.go | 55 - .../x/tools/go/types/objectpath/objectpath.go | 523 - .../x/tools/go/types/typeutil/callee.go | 46 - .../x/tools/go/types/typeutil/imports.go | 31 - .../x/tools/go/types/typeutil/map.go | 313 - .../tools/go/types/typeutil/methodsetcache.go | 72 - .../x/tools/go/types/typeutil/ui.go | 52 - .../internal/analysisinternal/analysis.go | 118 - .../x/tools/internal/gocommand/invoke.go | 186 - .../internal/packagesinternal/packages.go | 35 - .../x/tools/internal/telemetry/event/event.go | 108 - .../tools/internal/telemetry/event/export.go | 68 - .../x/tools/internal/telemetry/event/key.go | 499 - .../x/tools/internal/telemetry/event/label.go | 29 - .../x/tools/internal/telemetry/event/log.go | 59 - .../tools/internal/telemetry/event/metric.go | 29 - .../x/tools/internal/telemetry/event/tag.go | 256 - .../x/tools/internal/telemetry/event/trace.go | 42 - vendor/golang.org/x/xerrors/LICENSE | 27 - vendor/golang.org/x/xerrors/PATENTS | 22 - vendor/golang.org/x/xerrors/README | 2 - vendor/golang.org/x/xerrors/adaptor.go | 193 - vendor/golang.org/x/xerrors/codereview.cfg | 1 - vendor/golang.org/x/xerrors/doc.go | 22 - vendor/golang.org/x/xerrors/errors.go | 33 - vendor/golang.org/x/xerrors/fmt.go | 187 - vendor/golang.org/x/xerrors/format.go | 34 - vendor/golang.org/x/xerrors/frame.go | 56 - vendor/golang.org/x/xerrors/go.mod | 3 - vendor/golang.org/x/xerrors/wrap.go | 106 - .../gomodules.xyz/jsonpatch/v2/jsonpatch.go | 24 +- .../google.golang.org/appengine/.travis.yml | 2 - .../appengine/internal/api.go | 7 +- .../protobuf}/AUTHORS | 2 +- .../protobuf}/CONTRIBUTORS | 2 +- .../protobuf}/LICENSE | 2 +- .../protobuf}/PATENTS | 0 .../protobuf/encoding/prototext/decode.go | 796 + .../protobuf/encoding/prototext/doc.go} | 8 +- .../protobuf/encoding/prototext/encode.go | 433 + .../protobuf/encoding/protowire/wire.go | 538 + .../protobuf/internal/descfmt/stringer.go | 316 + .../protobuf/internal/descopts/options.go | 29 + .../protobuf/internal/detrand/rand.go | 61 + .../internal/encoding/defval/default.go | 213 + .../encoding/messageset/messageset.go | 258 + .../protobuf/internal/encoding/tag/tag.go | 207 + .../protobuf/internal/encoding/text/decode.go | 665 + .../internal/encoding/text/decode_number.go | 190 + .../internal/encoding/text/decode_string.go | 161 + .../internal/encoding/text/decode_token.go | 373 + .../protobuf/internal/encoding/text/doc.go | 29 + .../protobuf/internal/encoding/text/encode.go | 267 + .../protobuf/internal/errors/errors.go | 89 + .../protobuf/internal/errors/is_go112.go | 39 + .../protobuf/internal/errors/is_go113.go | 12 + .../protobuf/internal/fieldnum/any_gen.go | 13 + .../protobuf/internal/fieldnum/api_gen.go | 35 + .../internal/fieldnum/descriptor_gen.go | 240 + .../protobuf/internal/fieldnum/doc.go} | 8 +- .../internal/fieldnum/duration_gen.go | 13 + .../protobuf/internal/fieldnum/empty_gen.go} | 9 +- .../internal/fieldnum/field_mask_gen.go | 12 + .../internal/fieldnum/source_context_gen.go | 12 + .../protobuf/internal/fieldnum/struct_gen.go | 33 + .../internal/fieldnum/timestamp_gen.go | 13 + .../protobuf/internal/fieldnum/type_gen.go | 53 + .../internal/fieldnum/wrappers_gen.go | 52 + .../protobuf/internal/fieldsort/fieldsort.go | 40 + .../protobuf/internal/filedesc/build.go | 155 + .../protobuf/internal/filedesc/desc.go | 613 + .../protobuf/internal/filedesc/desc_init.go | 471 + .../protobuf/internal/filedesc/desc_lazy.go | 704 + .../protobuf/internal/filedesc/desc_list.go | 286 + .../internal/filedesc/desc_list_gen.go | 345 + .../protobuf/internal/filedesc/placeholder.go | 107 + .../protobuf/internal/filetype/build.go | 297 + .../protobuf/internal/flags/flags.go | 24 + .../internal/flags/proto_legacy_disable.go} | 7 +- .../internal/flags/proto_legacy_enable.go | 9 + .../protobuf/internal/genname/name.go | 25 + .../protobuf/internal/impl/api_export.go | 170 + .../protobuf/internal/impl/checkinit.go | 141 + .../protobuf/internal/impl/codec_extension.go | 223 + .../protobuf/internal/impl/codec_field.go | 828 + .../protobuf/internal/impl/codec_gen.go | 5637 +++ .../protobuf/internal/impl/codec_map.go | 388 + .../protobuf/internal/impl/codec_map_go111.go | 37 + .../internal/impl/codec_map_go112.go} | 10 +- .../protobuf/internal/impl/codec_message.go | 159 + .../internal/impl/codec_messageset.go | 120 + .../protobuf/internal/impl/codec_reflect.go | 209 + .../protobuf/internal/impl/codec_tables.go | 557 + .../protobuf/internal/impl/codec_unsafe.go | 17 + .../protobuf/internal/impl/convert.go | 467 + .../protobuf/internal/impl/convert_list.go | 141 + .../protobuf/internal/impl/convert_map.go | 121 + .../protobuf/internal/impl/decode.go | 274 + .../protobuf/internal/impl/encode.go | 199 + .../protobuf/internal/impl/enum.go | 21 + .../protobuf/internal/impl/extension.go | 156 + .../protobuf/internal/impl/legacy_enum.go | 219 + .../protobuf/internal/impl/legacy_export.go | 92 + .../internal/impl/legacy_extension.go | 175 + .../protobuf/internal/impl/legacy_file.go | 81 + .../protobuf/internal/impl/legacy_message.go | 502 + .../protobuf/internal/impl/merge.go | 176 + .../protobuf/internal/impl/merge_gen.go | 209 + .../protobuf/internal/impl/message.go | 215 + .../protobuf/internal/impl/message_reflect.go | 364 + .../internal/impl/message_reflect_field.go | 466 + .../internal/impl/message_reflect_gen.go | 249 + .../protobuf/internal/impl/pointer_reflect.go | 177 + .../protobuf/internal/impl/pointer_unsafe.go | 173 + .../protobuf/internal/impl/validate.go | 575 + .../protobuf/internal/impl/weak.go | 74 + .../protobuf/internal/mapsort/mapsort.go | 43 + .../protobuf/internal/pragma/pragma.go | 29 + .../protobuf/internal/set/ints.go | 58 + .../protobuf/internal/strs/strings.go | 196 + .../protobuf/internal/strs/strings_pure.go | 27 + .../protobuf/internal/strs/strings_unsafe.go | 94 + .../protobuf/internal/version/version.go | 79 + .../protobuf/proto/checkinit.go | 71 + .../protobuf/proto/decode.go | 273 + .../protobuf/proto/decode_gen.go | 603 + .../google.golang.org/protobuf/proto/doc.go | 94 + .../protobuf/proto/encode.go | 346 + .../protobuf/proto/encode_gen.go | 97 + .../google.golang.org/protobuf/proto/equal.go | 154 + .../protobuf/proto/extension.go | 92 + .../google.golang.org/protobuf/proto/merge.go | 139 + .../protobuf/proto/messageset.go | 88 + .../google.golang.org/protobuf/proto/proto.go | 34 + .../protobuf/proto/proto_methods.go | 19 + .../protobuf/proto/proto_reflect.go | 19 + .../google.golang.org/protobuf/proto/reset.go | 43 + .../google.golang.org/protobuf/proto/size.go | 97 + .../protobuf/proto/size_gen.go | 55 + .../protobuf/proto/wrappers.go | 29 + .../protobuf/reflect/protoreflect/methods.go | 77 + .../protobuf/reflect/protoreflect/proto.go | 478 + .../protobuf/reflect/protoreflect/source.go | 52 + .../protobuf/reflect/protoreflect/type.go | 631 + .../protobuf/reflect/protoreflect/value.go | 285 + .../reflect/protoreflect/value_pure.go | 59 + .../reflect/protoreflect/value_union.go | 411 + .../reflect/protoreflect/value_unsafe.go | 98 + .../reflect/protoregistry/registry.go | 800 + .../protobuf/runtime/protoiface/legacy.go | 15 + .../protobuf/runtime/protoiface/methods.go | 167 + .../protobuf/runtime/protoimpl/impl.go | 44 + .../protobuf/runtime/protoimpl/version.go | 56 + .../protobuf/types/known/anypb/any.pb.go | 287 + .../types/known/durationpb/duration.pb.go | 249 + .../types/known/timestamppb/timestamp.pb.go | 271 + vendor/gopkg.in/fsnotify.v1/.editorconfig | 5 - vendor/gopkg.in/fsnotify.v1/.travis.yml | 30 - vendor/gopkg.in/yaml.v2/apic.go | 1 + vendor/gopkg.in/yaml.v3/.travis.yml | 17 + vendor/gopkg.in/yaml.v3/LICENSE | 50 + vendor/gopkg.in/yaml.v3/NOTICE | 13 + vendor/gopkg.in/yaml.v3/README.md | 150 + vendor/gopkg.in/yaml.v3/apic.go | 747 + vendor/gopkg.in/yaml.v3/decode.go | 948 + vendor/gopkg.in/yaml.v3/emitterc.go | 2022 + vendor/gopkg.in/yaml.v3/encode.go | 572 + vendor/gopkg.in/yaml.v3/go.mod | 5 + vendor/gopkg.in/yaml.v3/parserc.go | 1249 + vendor/gopkg.in/yaml.v3/readerc.go | 434 + vendor/gopkg.in/yaml.v3/resolve.go | 326 + vendor/gopkg.in/yaml.v3/scannerc.go | 3028 ++ vendor/gopkg.in/yaml.v3/sorter.go | 134 + vendor/gopkg.in/yaml.v3/writerc.go | 48 + vendor/gopkg.in/yaml.v3/yaml.go | 693 + vendor/gopkg.in/yaml.v3/yamlh.go | 807 + vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 + vendor/honnef.co/go/tools/LICENSE | 20 - vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY | 226 - vendor/honnef.co/go/tools/arg/arg.go | 48 - .../go/tools/cmd/staticcheck/README.md | 15 - .../go/tools/cmd/staticcheck/staticcheck.go | 44 - vendor/honnef.co/go/tools/config/config.go | 224 - vendor/honnef.co/go/tools/config/example.conf | 10 - .../honnef.co/go/tools/deprecated/stdlib.go | 112 - vendor/honnef.co/go/tools/facts/deprecated.go | 144 - vendor/honnef.co/go/tools/facts/generated.go | 86 - vendor/honnef.co/go/tools/facts/purity.go | 175 - vendor/honnef.co/go/tools/facts/token.go | 24 - vendor/honnef.co/go/tools/functions/loops.go | 54 - vendor/honnef.co/go/tools/functions/pure.go | 46 - .../go/tools/functions/terminates.go | 24 - .../go/tools/go/types/typeutil/callee.go | 46 - .../go/tools/go/types/typeutil/identical.go | 75 - .../go/tools/go/types/typeutil/imports.go | 31 - .../go/tools/go/types/typeutil/map.go | 319 - .../tools/go/types/typeutil/methodsetcache.go | 72 - .../go/tools/go/types/typeutil/ui.go | 52 - .../go/tools/internal/cache/cache.go | 474 - .../go/tools/internal/cache/default.go | 85 - .../honnef.co/go/tools/internal/cache/hash.go | 176 - .../internal/passes/buildssa/buildssa.go | 116 - .../go/tools/internal/renameio/renameio.go | 83 - .../go/tools/internal/sharedcheck/lint.go | 70 - vendor/honnef.co/go/tools/lint/LICENSE | 28 - vendor/honnef.co/go/tools/lint/lint.go | 491 - .../go/tools/lint/lintdsl/lintdsl.go | 400 - .../go/tools/lint/lintutil/format/format.go | 135 - .../honnef.co/go/tools/lint/lintutil/stats.go | 7 - .../go/tools/lint/lintutil/stats_bsd.go | 10 - .../go/tools/lint/lintutil/stats_posix.go | 10 - .../honnef.co/go/tools/lint/lintutil/util.go | 392 - vendor/honnef.co/go/tools/lint/runner.go | 970 - vendor/honnef.co/go/tools/lint/stats.go | 20 - vendor/honnef.co/go/tools/loader/loader.go | 197 - vendor/honnef.co/go/tools/printf/fuzz.go | 11 - vendor/honnef.co/go/tools/printf/printf.go | 197 - .../honnef.co/go/tools/simple/CONTRIBUTING.md | 15 - vendor/honnef.co/go/tools/simple/analysis.go | 223 - vendor/honnef.co/go/tools/simple/doc.go | 425 - vendor/honnef.co/go/tools/simple/lint.go | 1816 - vendor/honnef.co/go/tools/ssa/LICENSE | 28 - vendor/honnef.co/go/tools/ssa/blockopt.go | 195 - vendor/honnef.co/go/tools/ssa/builder.go | 2379 - vendor/honnef.co/go/tools/ssa/const.go | 169 - vendor/honnef.co/go/tools/ssa/create.go | 270 - vendor/honnef.co/go/tools/ssa/doc.go | 125 - vendor/honnef.co/go/tools/ssa/dom.go | 343 - vendor/honnef.co/go/tools/ssa/emit.go | 469 - vendor/honnef.co/go/tools/ssa/func.go | 765 - vendor/honnef.co/go/tools/ssa/identical.go | 7 - vendor/honnef.co/go/tools/ssa/identical_17.go | 7 - vendor/honnef.co/go/tools/ssa/lift.go | 657 - vendor/honnef.co/go/tools/ssa/lvalue.go | 123 - vendor/honnef.co/go/tools/ssa/methods.go | 239 - vendor/honnef.co/go/tools/ssa/mode.go | 100 - vendor/honnef.co/go/tools/ssa/print.go | 435 - vendor/honnef.co/go/tools/ssa/sanity.go | 535 - vendor/honnef.co/go/tools/ssa/source.go | 293 - vendor/honnef.co/go/tools/ssa/ssa.go | 1745 - .../honnef.co/go/tools/ssa/staticcheck.conf | 3 - vendor/honnef.co/go/tools/ssa/testmain.go | 271 - vendor/honnef.co/go/tools/ssa/util.go | 119 - vendor/honnef.co/go/tools/ssa/wrappers.go | 290 - vendor/honnef.co/go/tools/ssa/write.go | 5 - vendor/honnef.co/go/tools/ssautil/ssautil.go | 58 - .../go/tools/staticcheck/CONTRIBUTING.md | 15 - .../go/tools/staticcheck/analysis.go | 525 - .../go/tools/staticcheck/buildtag.go | 21 - vendor/honnef.co/go/tools/staticcheck/doc.go | 764 - .../go/tools/staticcheck/knowledge.go | 25 - vendor/honnef.co/go/tools/staticcheck/lint.go | 3360 -- .../honnef.co/go/tools/staticcheck/rules.go | 321 - .../go/tools/staticcheck/structtag.go | 58 - .../go/tools/staticcheck/vrp/channel.go | 73 - .../honnef.co/go/tools/staticcheck/vrp/int.go | 476 - .../go/tools/staticcheck/vrp/slice.go | 273 - .../go/tools/staticcheck/vrp/string.go | 258 - .../honnef.co/go/tools/staticcheck/vrp/vrp.go | 1056 - .../honnef.co/go/tools/stylecheck/analysis.go | 111 - vendor/honnef.co/go/tools/stylecheck/doc.go | 154 - vendor/honnef.co/go/tools/stylecheck/lint.go | 629 - vendor/honnef.co/go/tools/stylecheck/names.go | 264 - vendor/honnef.co/go/tools/unused/edge.go | 54 - .../go/tools/unused/edgekind_string.go | 109 - .../honnef.co/go/tools/unused/implements.go | 82 - vendor/honnef.co/go/tools/unused/unused.go | 1964 - .../honnef.co/go/tools/version/buildinfo.go | 46 - .../go/tools/version/buildinfo111.go | 6 - vendor/honnef.co/go/tools/version/version.go | 42 - vendor/istio.io/api/LICENSE | 202 + .../api/analysis/v1alpha1/message.gen.json | 130 + .../api/analysis/v1alpha1/message.pb.go | 1938 + .../api/analysis/v1alpha1/message.proto | 144 + .../analysis/v1alpha1/message_deepcopy.gen.go | 144 + .../api/analysis/v1alpha1/message_json.gen.go | 91 + .../api/meta/v1alpha1/status.gen.json | 111 + .../istio.io/api/meta/v1alpha1/status.pb.go | 961 + .../istio.io/api/meta/v1alpha1/status.proto | 74 + .../api/meta/v1alpha1/status_deepcopy.gen.go | 60 + .../api/meta/v1alpha1/status_json.gen.go | 47 + .../v1beta1/destination_rule.gen.json | 558 + .../networking/v1beta1/destination_rule.pb.go | 7062 +++ .../networking/v1beta1/destination_rule.proto | 1048 + .../v1beta1/destination_rule_deepcopy.gen.go | 494 + .../v1beta1/destination_rule_json.gen.go | 341 + .../api/networking/v1beta1/gateway.gen.json | 183 + .../api/networking/v1beta1/gateway.pb.go | 2734 ++ .../api/networking/v1beta1/gateway.proto | 733 + .../v1beta1/gateway_deepcopy.gen.go | 430 + .../networking/v1beta1/gateway_json.gen.go | 397 + .../networking/v1beta1/service_entry.gen.json | 172 + .../networking/v1beta1/service_entry.pb.go | 1745 + .../networking/v1beta1/service_entry.proto | 953 + .../v1beta1/service_entry_deepcopy.gen.go | 784 + .../v1beta1/service_entry_json.gen.go | 781 + .../api/networking/v1beta1/sidecar.gen.json | 179 + .../api/networking/v1beta1/sidecar.pb.go | 2364 + .../api/networking/v1beta1/sidecar.proto | 635 + .../v1beta1/sidecar_deepcopy.gen.go | 499 + .../networking/v1beta1/sidecar_json.gen.go | 456 + .../v1beta1/virtual_service.gen.json | 818 + .../networking/v1beta1/virtual_service.pb.go | 11187 +++++ .../networking/v1beta1/virtual_service.proto | 1839 + .../v1beta1/virtual_service_deepcopy.gen.go | 648 + .../v1beta1/virtual_service_json.gen.go | 425 + .../v1beta1/workload_entry.gen.json | 56 + .../networking/v1beta1/workload_entry.pb.go | 1148 + .../networking/v1beta1/workload_entry.proto | 324 + .../v1beta1/workload_entry_deepcopy.gen.go | 244 + .../v1beta1/workload_entry_json.gen.go | 241 + vendor/istio.io/client-go/LICENSE | 202 + .../pkg/apis/networking/v1beta1/doc.go | 21 + .../apis/networking/v1beta1/register.gen.go | 59 + .../pkg/apis/networking/v1beta1/types.gen.go | 318 + .../v1beta1/zz_generated.deepcopy.gen.go | 389 + vendor/istio.io/gogo-genproto/LICENSE | 202 + .../googleapis/google/api/annotations.pb.go | 56 + .../googleapis/google/api/http.pb.go | 2197 + vendor/k8s.io/api/admission/v1/doc.go | 23 + .../k8s.io/api/admission/v1/generated.pb.go | 1792 + .../k8s.io/api/admission/v1/generated.proto | 167 + vendor/k8s.io/api/admission/v1/register.go | 51 + vendor/k8s.io/api/admission/v1/types.go | 169 + .../v1/types_swagger_doc_generated.go | 78 + .../api/admission/v1/zz_generated.deepcopy.go | 141 + vendor/k8s.io/api/admission/v1beta1/doc.go | 1 + .../api/admission/v1beta1/generated.pb.go | 225 +- .../api/admission/v1beta1/generated.proto | 7 + vendor/k8s.io/api/admission/v1beta1/types.go | 12 + .../v1beta1/types_swagger_doc_generated.go | 1 + .../v1beta1/zz_generated.deepcopy.go | 5 + .../zz_generated.prerelease-lifecycle.go | 49 + .../admissionregistration/v1/generated.pb.go | 60 +- .../admissionregistration/v1/generated.proto | 4 +- .../api/admissionregistration/v1/types.go | 4 +- .../v1/types_swagger_doc_generated.go | 2 +- .../api/admissionregistration/v1beta1/doc.go | 1 + .../v1beta1/generated.pb.go | 60 +- .../v1beta1/generated.proto | 8 +- .../admissionregistration/v1beta1/types.go | 24 +- .../v1beta1/types_swagger_doc_generated.go | 6 +- .../zz_generated.prerelease-lifecycle.go | 121 + vendor/k8s.io/api/apps/v1/generated.pb.go | 60 +- vendor/k8s.io/api/apps/v1beta1/doc.go | 1 + .../k8s.io/api/apps/v1beta1/generated.pb.go | 60 +- vendor/k8s.io/api/apps/v1beta1/types.go | 32 + .../zz_generated.prerelease-lifecycle.go | 217 + vendor/k8s.io/api/apps/v1beta2/doc.go | 1 + .../k8s.io/api/apps/v1beta2/generated.pb.go | 60 +- vendor/k8s.io/api/apps/v1beta2/types.go | 44 + .../zz_generated.prerelease-lifecycle.go | 289 + .../v1alpha1/generated.proto | 162 - .../api/auditregistration/v1alpha1/types.go | 198 - .../v1alpha1/types_swagger_doc_generated.go | 111 - .../api/authentication/v1/generated.pb.go | 60 +- vendor/k8s.io/api/authentication/v1/types.go | 2 +- .../k8s.io/api/authentication/v1beta1/doc.go | 1 + .../authentication/v1beta1/generated.pb.go | 60 +- .../api/authentication/v1beta1/types.go | 5 +- .../zz_generated.prerelease-lifecycle.go | 49 + .../api/authorization/v1/generated.pb.go | 60 +- vendor/k8s.io/api/authorization/v1/types.go | 8 +- .../k8s.io/api/authorization/v1beta1/doc.go | 1 + .../api/authorization/v1beta1/generated.pb.go | 60 +- .../k8s.io/api/authorization/v1beta1/types.go | 20 +- .../zz_generated.prerelease-lifecycle.go | 121 + .../k8s.io/api/autoscaling/v1/generated.pb.go | 60 +- vendor/k8s.io/api/autoscaling/v1/types.go | 4 +- vendor/k8s.io/api/autoscaling/v2beta1/doc.go | 1 + .../api/autoscaling/v2beta1/generated.pb.go | 60 +- .../k8s.io/api/autoscaling/v2beta1/types.go | 10 +- .../zz_generated.prerelease-lifecycle.go | 73 + vendor/k8s.io/api/autoscaling/v2beta2/doc.go | 1 + .../api/autoscaling/v2beta2/generated.pb.go | 1298 +- .../api/autoscaling/v2beta2/generated.proto | 66 + .../k8s.io/api/autoscaling/v2beta2/types.go | 94 +- .../v2beta2/types_swagger_doc_generated.go | 33 + .../v2beta2/zz_generated.deepcopy.go | 78 + .../zz_generated.prerelease-lifecycle.go | 57 + vendor/k8s.io/api/batch/v1/generated.pb.go | 60 +- vendor/k8s.io/api/batch/v1beta1/doc.go | 1 + .../k8s.io/api/batch/v1beta1/generated.pb.go | 60 +- vendor/k8s.io/api/batch/v1beta1/types.go | 8 +- .../zz_generated.prerelease-lifecycle.go | 75 + .../k8s.io/api/batch/v2alpha1/generated.pb.go | 60 +- .../v1alpha1 => certificates/v1}/doc.go | 6 +- .../api/certificates/v1/generated.pb.go | 2042 + .../api/certificates/v1/generated.proto | 226 + vendor/k8s.io/api/certificates/v1/register.go | 61 + vendor/k8s.io/api/certificates/v1/types.go | 284 + .../v1/types_swagger_doc_generated.go | 88 + .../certificates/v1/zz_generated.deepcopy.go | 198 + vendor/k8s.io/api/certificates/v1beta1/doc.go | 1 + .../api/certificates/v1beta1/generated.pb.go | 301 +- .../api/certificates/v1beta1/generated.proto | 59 +- .../k8s.io/api/certificates/v1beta1/types.go | 132 +- .../v1beta1/types_swagger_doc_generated.go | 25 +- .../v1beta1/zz_generated.deepcopy.go | 6 + .../zz_generated.prerelease-lifecycle.go | 73 + .../api/coordination/v1/generated.pb.go | 60 +- vendor/k8s.io/api/coordination/v1beta1/doc.go | 1 + .../api/coordination/v1beta1/generated.pb.go | 60 +- .../k8s.io/api/coordination/v1beta1/types.go | 6 + .../zz_generated.prerelease-lifecycle.go | 73 + .../api/core/v1/annotation_key_constants.go | 27 +- vendor/k8s.io/api/core/v1/generated.pb.go | 3719 +- vendor/k8s.io/api/core/v1/generated.proto | 335 +- vendor/k8s.io/api/core/v1/lifecycle.go | 37 + vendor/k8s.io/api/core/v1/resource.go | 8 + vendor/k8s.io/api/core/v1/types.go | 402 +- .../core/v1/types_swagger_doc_generated.go | 158 +- .../k8s.io/api/core/v1/well_known_labels.go | 16 +- .../k8s.io/api/core/v1/well_known_taints.go | 48 + .../api/core/v1/zz_generated.deepcopy.go | 118 +- .../api/discovery/v1alpha1/generated.pb.go | 221 +- .../api/discovery/v1alpha1/generated.proto | 36 +- vendor/k8s.io/api/discovery/v1alpha1/types.go | 47 +- .../v1alpha1/types_swagger_doc_generated.go | 13 +- .../discovery/v1alpha1/well_known_labels.go | 6 + .../v1alpha1/zz_generated.deepcopy.go | 10 +- vendor/k8s.io/api/discovery/v1beta1/doc.go | 23 + .../api/discovery/v1beta1/generated.pb.go | 1704 + .../api/discovery/v1beta1/generated.proto | 157 + .../k8s.io/api/discovery/v1beta1/register.go | 56 + vendor/k8s.io/api/discovery/v1beta1/types.go | 166 + .../v1beta1/types_swagger_doc_generated.go | 86 + .../discovery/v1beta1/well_known_labels.go | 32 + .../v1beta1/zz_generated.deepcopy.go | 195 + .../zz_generated.prerelease-lifecycle.go | 57 + vendor/k8s.io/api/events/v1/doc.go | 23 + vendor/k8s.io/api/events/v1/generated.pb.go | 1406 + vendor/k8s.io/api/events/v1/generated.proto | 125 + vendor/k8s.io/api/events/v1/register.go | 53 + vendor/k8s.io/api/events/v1/types.go | 119 + .../events/v1/types_swagger_doc_generated.go | 72 + .../api/events/v1/zz_generated.deepcopy.go | 117 + vendor/k8s.io/api/events/v1beta1/doc.go | 1 + .../k8s.io/api/events/v1beta1/generated.pb.go | 198 +- .../k8s.io/api/events/v1beta1/generated.proto | 47 +- vendor/k8s.io/api/events/v1beta1/types.go | 60 +- .../v1beta1/types_swagger_doc_generated.go | 35 +- .../zz_generated.prerelease-lifecycle.go | 57 + vendor/k8s.io/api/extensions/v1beta1/doc.go | 1 + .../api/extensions/v1beta1/generated.pb.go | 815 +- .../api/extensions/v1beta1/generated.proto | 109 +- .../k8s.io/api/extensions/v1beta1/register.go | 1 - vendor/k8s.io/api/extensions/v1beta1/types.go | 202 +- .../v1beta1/types_swagger_doc_generated.go | 49 +- .../v1beta1/zz_generated.deepcopy.go | 48 +- .../zz_generated.prerelease-lifecycle.go | 349 + vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go | 24 + .../api/flowcontrol/v1alpha1/generated.pb.go | 5433 ++ .../api/flowcontrol/v1alpha1/generated.proto | 434 + .../api/flowcontrol/v1alpha1/register.go | 58 + .../k8s.io/api/flowcontrol/v1alpha1/types.go | 519 + .../v1alpha1/types_swagger_doc_generated.go | 258 + .../v1alpha1/zz_generated.deepcopy.go | 541 + .../k8s.io/api/networking/v1/generated.pb.go | 4410 +- .../k8s.io/api/networking/v1/generated.proto | 280 +- vendor/k8s.io/api/networking/v1/register.go | 4 + vendor/k8s.io/api/networking/v1/types.go | 323 +- .../v1/types_swagger_doc_generated.go | 157 +- .../networking/v1/zz_generated.deepcopy.go | 362 + vendor/k8s.io/api/networking/v1beta1/doc.go | 1 + .../api/networking/v1beta1/generated.pb.go | 1165 +- .../api/networking/v1beta1/generated.proto | 133 +- .../k8s.io/api/networking/v1beta1/register.go | 2 + vendor/k8s.io/api/networking/v1beta1/types.go | 189 +- .../v1beta1/types_swagger_doc_generated.go | 51 +- .../v1beta1/well_known_annotations.go | 32 + .../v1beta1/zz_generated.deepcopy.go | 105 +- .../zz_generated.prerelease-lifecycle.go | 121 + .../k8s.io/api/node/v1alpha1/generated.pb.go | 60 +- vendor/k8s.io/api/node/v1beta1/doc.go | 1 + .../k8s.io/api/node/v1beta1/generated.pb.go | 60 +- vendor/k8s.io/api/node/v1beta1/types.go | 4 + .../zz_generated.prerelease-lifecycle.go | 57 + vendor/k8s.io/api/policy/v1beta1/doc.go | 1 + .../k8s.io/api/policy/v1beta1/generated.pb.go | 302 +- .../k8s.io/api/policy/v1beta1/generated.proto | 20 +- vendor/k8s.io/api/policy/v1beta1/types.go | 35 +- .../v1beta1/types_swagger_doc_generated.go | 16 +- .../zz_generated.prerelease-lifecycle.go | 111 + vendor/k8s.io/api/rbac/v1/generated.pb.go | 60 +- .../k8s.io/api/rbac/v1alpha1/generated.pb.go | 60 +- .../k8s.io/api/rbac/v1alpha1/generated.proto | 15 +- vendor/k8s.io/api/rbac/v1alpha1/types.go | 15 +- .../v1alpha1/types_swagger_doc_generated.go | 18 +- vendor/k8s.io/api/rbac/v1beta1/doc.go | 1 + .../k8s.io/api/rbac/v1beta1/generated.pb.go | 60 +- .../k8s.io/api/rbac/v1beta1/generated.proto | 12 +- vendor/k8s.io/api/rbac/v1beta1/types.go | 44 +- .../v1beta1/types_swagger_doc_generated.go | 16 +- .../zz_generated.prerelease-lifecycle.go | 217 + .../k8s.io/api/scheduling/v1/generated.pb.go | 60 +- .../k8s.io/api/scheduling/v1/generated.proto | 2 +- vendor/k8s.io/api/scheduling/v1/types.go | 2 +- .../v1/types_swagger_doc_generated.go | 2 +- .../api/scheduling/v1alpha1/generated.pb.go | 60 +- .../api/scheduling/v1alpha1/generated.proto | 2 +- .../k8s.io/api/scheduling/v1alpha1/types.go | 2 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- vendor/k8s.io/api/scheduling/v1beta1/doc.go | 1 + .../api/scheduling/v1beta1/generated.pb.go | 60 +- .../api/scheduling/v1beta1/generated.proto | 2 +- vendor/k8s.io/api/scheduling/v1beta1/types.go | 10 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../zz_generated.prerelease-lifecycle.go | 73 + .../api/settings/v1alpha1/generated.pb.go | 60 +- vendor/k8s.io/api/storage/v1/generated.pb.go | 2980 +- vendor/k8s.io/api/storage/v1/generated.proto | 203 + vendor/k8s.io/api/storage/v1/register.go | 6 + vendor/k8s.io/api/storage/v1/types.go | 281 +- .../storage/v1/types_swagger_doc_generated.go | 83 + .../api/storage/v1/zz_generated.deepcopy.go | 231 + .../api/storage/v1alpha1/generated.pb.go | 767 +- .../api/storage/v1alpha1/generated.proto | 75 + .../k8s.io/api/storage/v1alpha1/register.go | 2 + vendor/k8s.io/api/storage/v1alpha1/types.go | 82 + .../v1alpha1/types_swagger_doc_generated.go | 22 + .../storage/v1alpha1/zz_generated.deepcopy.go | 74 +- vendor/k8s.io/api/storage/v1beta1/doc.go | 1 + .../api/storage/v1beta1/generated.pb.go | 314 +- .../api/storage/v1beta1/generated.proto | 30 + vendor/k8s.io/api/storage/v1beta1/types.go | 82 + .../v1beta1/types_swagger_doc_generated.go | 4 +- .../storage/v1beta1/zz_generated.deepcopy.go | 10 + .../zz_generated.prerelease-lifecycle.go | 217 + .../pkg/apis/apiextensions/deepcopy.go | 6 + .../pkg/apis/apiextensions/types.go | 9 + .../apis/apiextensions/types_jsonschema.go | 15 +- .../apiextensions/v1/.import-restrictions | 5 + .../pkg/apis/apiextensions/v1/conversion.go | 14 - .../pkg/apis/apiextensions/v1/deepcopy.go | 6 + .../pkg/apis/apiextensions/v1/generated.pb.go | 554 +- .../pkg/apis/apiextensions/v1/generated.proto | 58 +- .../pkg/apis/apiextensions/v1/register.go | 4 +- .../pkg/apis/apiextensions/v1/types.go | 15 +- .../apis/apiextensions/v1/types_jsonschema.go | 47 +- .../v1/zz_generated.conversion.go | 41 +- .../apiextensions/v1/zz_generated.deepcopy.go | 5 + .../v1beta1/.import-restrictions | 5 + .../apis/apiextensions/v1beta1/conversion.go | 14 - .../apis/apiextensions/v1beta1/deepcopy.go | 6 + .../pkg/apis/apiextensions/v1beta1/doc.go | 1 + .../apiextensions/v1beta1/generated.pb.go | 558 +- .../apiextensions/v1beta1/generated.proto | 58 +- .../apis/apiextensions/v1beta1/register.go | 4 +- .../pkg/apis/apiextensions/v1beta1/types.go | 28 +- .../apiextensions/v1beta1/types_jsonschema.go | 47 +- .../v1beta1/zz_generated.conversion.go | 21 +- .../v1beta1/zz_generated.deepcopy.go | 5 + .../zz_generated.prerelease-lifecycle.go | 97 + .../apiextensions/zz_generated.deepcopy.go | 5 + .../client/clientset/clientset/clientset.go | 2 +- .../v1/customresourcedefinition.go | 72 +- .../v1beta1/customresourcedefinition.go | 72 +- .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 3 - .../apimachinery/pkg/api/errors/errors.go | 105 +- .../k8s.io/apimachinery/pkg/api/meta/OWNERS | 6 - .../apimachinery/pkg/api/meta/conditions.go | 101 + .../k8s.io/apimachinery/pkg/api/meta/meta.go | 2 +- .../apimachinery/pkg/api/resource/OWNERS | 5 - .../pkg/api/resource/generated.pb.go | 2 +- .../apimachinery/pkg/api/resource/math.go | 8 +- .../apimachinery/pkg/api/resource/quantity.go | 5 + .../pkg/apis/meta/internalversion/register.go | 37 +- .../apis/meta/internalversion/scheme/doc.go | 17 + .../meta/internalversion/scheme/register.go | 39 + .../pkg/apis/meta/internalversion/types.go | 16 +- .../zz_generated.conversion.go | 2 + .../apimachinery/pkg/apis/meta/v1/OWNERS | 2 - .../pkg/apis/meta/v1/controller_ref.go | 19 +- .../pkg/apis/meta/v1/conversion.go | 156 +- .../apimachinery/pkg/apis/meta/v1/doc.go | 1 + .../apimachinery/pkg/apis/meta/v1/duration.go | 5 + .../pkg/apis/meta/v1/generated.pb.go | 890 +- .../pkg/apis/meta/v1/generated.proto | 114 +- .../apimachinery/pkg/apis/meta/v1/helpers.go | 4 +- .../apimachinery/pkg/apis/meta/v1/meta.go | 4 +- .../apimachinery/pkg/apis/meta/v1/register.go | 80 +- .../apimachinery/pkg/apis/meta/v1/time.go | 10 + .../apimachinery/pkg/apis/meta/v1/types.go | 143 +- .../meta/v1/types_swagger_doc_generated.go | 39 +- .../pkg/apis/meta/v1/unstructured/helpers.go | 54 +- .../apis/meta/v1/zz_generated.conversion.go | 535 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 17 + .../pkg/apis/meta/v1beta1/conversion.go | 27 +- .../pkg/apis/meta/v1beta1/generated.pb.go | 101 +- .../pkg/apis/meta/v1beta1/generated.proto | 1 - .../pkg/apis/meta/v1beta1/register.go | 35 +- .../apimachinery/pkg/conversion/converter.go | 165 +- .../apimachinery/pkg/fields/selector.go | 16 +- .../k8s.io/apimachinery/pkg/labels/labels.go | 14 +- .../apimachinery/pkg/labels/selector.go | 56 +- .../k8s.io/apimachinery/pkg/runtime/codec.go | 92 +- .../apimachinery/pkg/runtime/codec_check.go | 10 +- .../apimachinery/pkg/runtime/conversion.go | 129 +- .../apimachinery/pkg/runtime/converter.go | 124 +- .../apimachinery/pkg/runtime/embedded.go | 15 +- .../apimachinery/pkg/runtime/generated.pb.go | 60 +- .../apimachinery/pkg/runtime/interfaces.go | 66 + .../apimachinery/pkg/runtime/negotiate.go | 146 + .../apimachinery/pkg/runtime/register.go | 30 - .../pkg/runtime/schema/generated.pb.go | 2 +- .../pkg/runtime/schema/group_version.go | 23 +- .../k8s.io/apimachinery/pkg/runtime/scheme.go | 58 +- .../pkg/runtime/serializer/json/json.go | 68 +- .../runtime/serializer/protobuf/protobuf.go | 59 +- .../serializer/versioning/versioning.go | 82 +- .../k8s.io/apimachinery/pkg/runtime/types.go | 13 - .../pkg/runtime/zz_generated.deepcopy.go | 33 - .../apimachinery/pkg/util/cache/cache.go | 83 - .../apimachinery/pkg/util/cache/expiring.go | 192 + .../apimachinery/pkg/util/clock/clock.go | 29 +- .../k8s.io/apimachinery/pkg/util/diff/diff.go | 39 + .../apimachinery/pkg/util/errors/errors.go | 32 +- .../apimachinery/pkg/util/httpstream/doc.go | 19 + .../pkg/util/httpstream/httpstream.go | 157 + .../pkg/util/httpstream/spdy/connection.go | 145 + .../pkg/util/httpstream/spdy/roundtripper.go | 332 + .../pkg/util/httpstream/spdy/upgrade.go | 107 + .../pkg/util/intstr/generated.pb.go | 60 +- .../apimachinery/pkg/util/intstr/intstr.go | 5 +- .../k8s.io/apimachinery/pkg/util/json/json.go | 25 + .../pkg/util/naming/from_stack.go | 2 +- .../k8s.io/apimachinery/pkg/util/net/http.go | 352 +- .../apimachinery/pkg/util/net/interface.go | 75 +- .../k8s.io/apimachinery/pkg/util/net/util.go | 31 +- .../pkg/util/remotecommand/constants.go | 53 + .../apimachinery/pkg/util/runtime/runtime.go | 2 +- .../pkg/util/strategicpatch/patch.go | 6 +- .../pkg/util/validation/field/errors.go | 2 +- .../pkg/util/validation/validation.go | 89 +- .../k8s.io/apimachinery/pkg/util/wait/wait.go | 150 +- .../apimachinery/pkg/util/yaml/decoder.go | 6 +- .../apimachinery/pkg/watch/streamwatcher.go | 4 +- vendor/k8s.io/apimachinery/pkg/watch/watch.go | 18 +- .../third_party/forked/golang/netutil/addr.go | 27 + .../forked/golang/reflect/deep_equal.go | 4 +- vendor/k8s.io/autoscaler/LICENSE | 202 + .../apis/autoscaling.k8s.io/v1beta2/doc.go | 21 + .../autoscaling.k8s.io/v1beta2}/register.go | 20 +- .../apis/autoscaling.k8s.io/v1beta2/types.go | 339 + .../v1beta2/zz_generated.deepcopy.go | 432 + .../discovery/cached/memory/memcache.go | 43 +- .../client-go/discovery/discovery_client.go | 22 +- vendor/k8s.io/client-go/dynamic/interface.go | 20 +- vendor/k8s.io/client-go/dynamic/scheme.go | 78 +- vendor/k8s.io/client-go/dynamic/simple.go | 70 +- .../k8s.io/client-go/kubernetes/clientset.go | 72 +- .../client-go/kubernetes/scheme/register.go | 10 +- .../v1/mutatingwebhookconfiguration.go | 64 +- .../v1/validatingwebhookconfiguration.go | 64 +- .../v1beta1/mutatingwebhookconfiguration.go | 64 +- .../v1beta1/validatingwebhookconfiguration.go | 64 +- .../typed/apps/v1/controllerrevision.go | 64 +- .../kubernetes/typed/apps/v1/daemonset.go | 72 +- .../kubernetes/typed/apps/v1/deployment.go | 85 +- .../kubernetes/typed/apps/v1/replicaset.go | 85 +- .../kubernetes/typed/apps/v1/statefulset.go | 85 +- .../typed/apps/v1beta1/controllerrevision.go | 64 +- .../typed/apps/v1beta1/deployment.go | 72 +- .../typed/apps/v1beta1/statefulset.go | 72 +- .../typed/apps/v1beta2/controllerrevision.go | 64 +- .../typed/apps/v1beta2/daemonset.go | 72 +- .../typed/apps/v1beta2/deployment.go | 72 +- .../typed/apps/v1beta2/replicaset.go | 72 +- .../typed/apps/v1beta2/statefulset.go | 85 +- .../auditregistration/v1alpha1/auditsink.go | 164 - .../authentication/v1/generated_expansion.go | 2 + .../typed/authentication/v1/tokenreview.go | 18 + .../v1beta1/generated_expansion.go | 2 + .../authentication/v1beta1/tokenreview.go | 18 + .../authorization/v1/generated_expansion.go | 8 + .../v1/localsubjectaccessreview.go | 19 + .../v1/localsubjectaccessreview_expansion.go | 36 - .../v1/selfsubjectaccessreview.go | 18 + .../v1/selfsubjectaccessreview_expansion.go | 35 - .../v1/selfsubjectrulesreview.go | 18 + .../v1/selfsubjectrulesreview_expansion.go | 35 - .../authorization/v1/subjectaccessreview.go | 18 + .../v1/subjectaccessreview_expansion.go | 36 - .../v1beta1/generated_expansion.go | 8 + .../v1beta1/localsubjectaccessreview.go | 19 + .../localsubjectaccessreview_expansion.go | 36 - .../v1beta1/selfsubjectaccessreview.go | 18 + .../selfsubjectaccessreview_expansion.go | 35 - .../v1beta1/selfsubjectrulesreview.go | 18 + .../v1beta1/subjectaccessreview.go | 18 + .../v1beta1/subjectaccessreview_expansion.go | 36 - .../autoscaling/v1/horizontalpodautoscaler.go | 72 +- .../v2beta1/horizontalpodautoscaler.go | 72 +- .../v2beta2/horizontalpodautoscaler.go | 72 +- .../kubernetes/typed/batch/v1/job.go | 72 +- .../kubernetes/typed/batch/v1beta1/cronjob.go | 72 +- .../typed/batch/v2alpha1/cronjob.go | 72 +- .../v1/certificates_client.go} | 36 +- .../v1/certificatesigningrequest.go | 200 + .../kubernetes/typed/certificates/v1/doc.go | 20 + .../certificates/v1/generated_expansion.go | 21 + .../v1beta1/certificatesigningrequest.go | 72 +- .../certificatesigningrequest_expansion.go | 11 +- .../kubernetes/typed/coordination/v1/lease.go | 64 +- .../typed/coordination/v1beta1/lease.go | 64 +- .../typed/core/v1/componentstatus.go | 64 +- .../kubernetes/typed/core/v1/configmap.go | 64 +- .../kubernetes/typed/core/v1/endpoints.go | 64 +- .../kubernetes/typed/core/v1/event.go | 64 +- .../typed/core/v1/event_expansion.go | 21 +- .../typed/core/v1/generated_expansion.go | 2 + .../kubernetes/typed/core/v1/limitrange.go | 64 +- .../kubernetes/typed/core/v1/namespace.go | 58 +- .../typed/core/v1/namespace_expansion.go | 14 +- .../kubernetes/typed/core/v1/node.go | 72 +- .../typed/core/v1/node_expansion.go | 10 +- .../typed/core/v1/persistentvolume.go | 72 +- .../typed/core/v1/persistentvolumeclaim.go | 72 +- .../client-go/kubernetes/typed/core/v1/pod.go | 85 +- .../kubernetes/typed/core/v1/pod_expansion.go | 33 +- .../kubernetes/typed/core/v1/podtemplate.go | 64 +- .../typed/core/v1/replicationcontroller.go | 85 +- .../kubernetes/typed/core/v1/resourcequota.go | 72 +- .../kubernetes/typed/core/v1/secret.go | 64 +- .../kubernetes/typed/core/v1/service.go | 58 +- .../typed/core/v1/serviceaccount.go | 82 +- .../typed/core/v1/serviceaccount_expansion.go | 41 - .../typed/discovery/v1alpha1/endpointslice.go | 64 +- .../discovery/v1beta1/discovery_client.go | 89 + .../kubernetes/typed/discovery/v1beta1/doc.go | 20 + .../typed/discovery/v1beta1/endpointslice.go | 178 + .../discovery/v1beta1/generated_expansion.go | 21 + .../kubernetes/typed/events/v1/doc.go | 20 + .../kubernetes/typed/events/v1/event.go | 178 + .../typed/events/v1/events_client.go | 89 + .../v1}/generated_expansion.go | 4 +- .../kubernetes/typed/events/v1beta1/event.go | 64 +- .../typed/events/v1beta1/event_expansion.go | 7 +- .../typed/extensions/v1beta1/daemonset.go | 72 +- .../typed/extensions/v1beta1/deployment.go | 85 +- .../v1beta1/deployment_expansion.go | 14 +- .../typed/extensions/v1beta1/ingress.go | 72 +- .../typed/extensions/v1beta1/networkpolicy.go | 64 +- .../extensions/v1beta1/podsecuritypolicy.go | 64 +- .../typed/extensions/v1beta1/replicaset.go | 85 +- .../v1alpha1/doc.go | 0 .../v1alpha1/flowcontrol_client.go | 94 + .../typed/flowcontrol/v1alpha1/flowschema.go | 184 + .../v1alpha1/generated_expansion.go | 23 + .../v1alpha1/prioritylevelconfiguration.go | 184 + .../networking/v1/generated_expansion.go | 4 + .../kubernetes/typed/networking/v1/ingress.go | 195 + .../typed/networking/v1/ingressclass.go | 168 + .../typed/networking/v1/networking_client.go | 10 + .../typed/networking/v1/networkpolicy.go | 64 +- .../networking/v1beta1/generated_expansion.go | 2 + .../typed/networking/v1beta1/ingress.go | 72 +- .../typed/networking/v1beta1/ingressclass.go | 168 + .../networking/v1beta1/networking_client.go | 5 + .../typed/node/v1alpha1/runtimeclass.go | 64 +- .../typed/node/v1beta1/runtimeclass.go | 64 +- .../policy/v1beta1/eviction_expansion.go | 8 +- .../policy/v1beta1/poddisruptionbudget.go | 72 +- .../typed/policy/v1beta1/podsecuritypolicy.go | 64 +- .../kubernetes/typed/rbac/v1/clusterrole.go | 64 +- .../typed/rbac/v1/clusterrolebinding.go | 64 +- .../kubernetes/typed/rbac/v1/role.go | 64 +- .../kubernetes/typed/rbac/v1/rolebinding.go | 64 +- .../typed/rbac/v1alpha1/clusterrole.go | 64 +- .../typed/rbac/v1alpha1/clusterrolebinding.go | 64 +- .../kubernetes/typed/rbac/v1alpha1/role.go | 64 +- .../typed/rbac/v1alpha1/rolebinding.go | 64 +- .../typed/rbac/v1beta1/clusterrole.go | 64 +- .../typed/rbac/v1beta1/clusterrolebinding.go | 64 +- .../kubernetes/typed/rbac/v1beta1/role.go | 64 +- .../typed/rbac/v1beta1/rolebinding.go | 64 +- .../typed/scheduling/v1/priorityclass.go | 64 +- .../scheduling/v1alpha1/priorityclass.go | 64 +- .../typed/scheduling/v1beta1/priorityclass.go | 64 +- .../typed/settings/v1alpha1/podpreset.go | 64 +- .../kubernetes/typed/storage/v1/csidriver.go | 168 + .../kubernetes/typed/storage/v1/csinode.go | 168 + .../typed/storage/v1/generated_expansion.go | 4 + .../typed/storage/v1/storage_client.go | 10 + .../typed/storage/v1/storageclass.go | 64 +- .../typed/storage/v1/volumeattachment.go | 72 +- .../storage/v1alpha1/csistoragecapacity.go | 178 + .../storage/v1alpha1/generated_expansion.go | 2 + .../typed/storage/v1alpha1/storage_client.go | 5 + .../storage/v1alpha1/volumeattachment.go | 72 +- .../typed/storage/v1beta1/csidriver.go | 64 +- .../typed/storage/v1beta1/csinode.go | 64 +- .../typed/storage/v1beta1/storageclass.go | 64 +- .../typed/storage/v1beta1/volumeattachment.go | 72 +- vendor/k8s.io/client-go/metadata/interface.go | 49 + vendor/k8s.io/client-go/metadata/metadata.go | 307 + .../v1beta1/zz_generated.conversion.go | 5 - vendor/k8s.io/client-go/pkg/version/def.bzl | 2 +- .../plugin/pkg/client/auth/exec/exec.go | 148 +- .../plugin/pkg/client/auth/exec/metrics.go | 60 + .../plugin/pkg/client/auth/gcp/gcp.go | 2 +- vendor/k8s.io/client-go/rest/OWNERS | 1 - vendor/k8s.io/client-go/rest/client.go | 145 +- vendor/k8s.io/client-go/rest/config.go | 95 +- vendor/k8s.io/client-go/rest/plugin.go | 2 +- vendor/k8s.io/client-go/rest/request.go | 457 +- vendor/k8s.io/client-go/rest/transport.go | 3 +- vendor/k8s.io/client-go/rest/urlbackoff.go | 2 +- vendor/k8s.io/client-go/rest/warnings.go | 144 + .../k8s.io/client-go/restmapper/discovery.go | 2 +- .../k8s.io/client-go/restmapper/shortcut.go | 2 +- vendor/k8s.io/client-go/tools/cache/OWNERS | 8 - .../client-go/tools/cache/controller.go | 81 +- .../client-go/tools/cache/delta_fifo.go | 221 +- .../client-go/tools/cache/expiration_cache.go | 6 +- vendor/k8s.io/client-go/tools/cache/fifo.go | 71 +- vendor/k8s.io/client-go/tools/cache/index.go | 17 +- .../k8s.io/client-go/tools/cache/listers.go | 2 +- .../k8s.io/client-go/tools/cache/listwatch.go | 10 +- .../client-go/tools/cache/mutation_cache.go | 2 +- .../tools/cache/mutation_detector.go | 52 +- .../k8s.io/client-go/tools/cache/reflector.go | 314 +- .../tools/cache/reflector_metrics.go | 13 - .../client-go/tools/cache/shared_informer.go | 267 +- vendor/k8s.io/client-go/tools/cache/store.go | 46 +- .../tools/cache/thread_safe_store.go | 45 +- .../client-go/tools/clientcmd/api/helpers.go | 3 + .../client-go/tools/clientcmd/api/types.go | 26 +- .../tools/clientcmd/api/v1/conversion.go | 346 +- .../client-go/tools/clientcmd/api/v1/doc.go | 1 + .../tools/clientcmd/api/v1/register.go | 2 +- .../client-go/tools/clientcmd/api/v1/types.go | 23 +- .../api/v1/zz_generated.conversion.go | 430 + .../tools/clientcmd/client_config.go | 103 +- .../client-go/tools/clientcmd/config.go | 29 +- .../client-go/tools/clientcmd/helpers.go | 15 + .../client-go/tools/clientcmd/loader.go | 2 +- .../tools/clientcmd/merged_client_builder.go | 37 +- .../client-go/tools/clientcmd/overrides.go | 4 + .../client-go/tools/clientcmd/validation.go | 61 +- .../client-go/tools/leaderelection/OWNERS | 2 - .../tools/leaderelection/leaderelection.go | 47 +- .../resourcelock/configmaplock.go | 35 +- .../resourcelock/endpointslock.go | 35 +- .../leaderelection/resourcelock/interface.go | 68 +- .../leaderelection/resourcelock/leaselock.go | 59 +- .../leaderelection/resourcelock/multilock.go | 104 + vendor/k8s.io/client-go/tools/metrics/OWNERS | 3 - .../k8s.io/client-go/tools/metrics/metrics.go | 52 +- vendor/k8s.io/client-go/tools/pager/pager.go | 36 +- .../k8s.io/client-go/tools/portforward/doc.go | 19 + .../tools/portforward/portforward.go | 429 + vendor/k8s.io/client-go/tools/record/OWNERS | 1 - vendor/k8s.io/client-go/tools/record/doc.go | 3 +- vendor/k8s.io/client-go/tools/record/event.go | 57 +- .../client-go/tools/record/events_cache.go | 5 +- vendor/k8s.io/client-go/tools/record/fake.go | 6 +- .../client-go/tools/remotecommand/doc.go | 20 + .../tools/remotecommand/errorstream.go | 55 + .../client-go/tools/remotecommand/reader.go | 41 + .../tools/remotecommand/remotecommand.go | 142 + .../remotecommand/resize.go} | 26 +- .../client-go/tools/remotecommand/v1.go | 160 + .../client-go/tools/remotecommand/v2.go | 195 + .../client-go/tools/remotecommand/v3.go | 111 + .../client-go/tools/remotecommand/v4.go | 119 + vendor/k8s.io/client-go/transport/cache.go | 81 +- .../client-go/transport/cert_rotation.go | 176 + vendor/k8s.io/client-go/transport/config.go | 15 +- .../client-go/transport/round_trippers.go | 11 +- .../k8s.io/client-go/transport/spdy/spdy.go | 98 + .../client-go/transport/token_source.go | 2 +- .../k8s.io/client-go/transport/transport.go | 62 +- vendor/k8s.io/client-go/util/cert/cert.go | 6 +- vendor/k8s.io/client-go/util/cert/io.go | 17 +- vendor/k8s.io/client-go/util/cert/pem.go | 12 + .../client-go/util/cert/server_inspection.go | 102 + vendor/k8s.io/client-go/util/exec/exec.go | 52 + .../client-go/util/jsonpath/jsonpath.go | 92 +- .../k8s.io/client-go/util/jsonpath/parser.go | 5 +- vendor/k8s.io/client-go/util/retry/util.go | 65 +- .../util/workqueue/delaying_queue.go | 31 +- .../client-go/util/workqueue/metrics.go | 11 +- .../client-go/util/workqueue/parallelizer.go | 68 +- vendor/k8s.io/component-base/LICENSE | 202 + vendor/k8s.io/component-base/config/OWNERS | 14 + .../component-base/config/doc.go} | 10 +- vendor/k8s.io/component-base/config/types.go | 88 + .../config/v1alpha1/conversion.go | 61 + .../config/v1alpha1/defaults.go | 112 + .../component-base/config/v1alpha1/doc.go | 20 + .../config/v1alpha1/register.go | 31 + .../component-base/config/v1alpha1/types.go | 90 + .../v1alpha1/zz_generated.conversion.go | 152 + .../config/v1alpha1/zz_generated.deepcopy.go | 103 + .../config/zz_generated.deepcopy.go | 88 + vendor/k8s.io/klog/v2/.gitignore | 17 + vendor/k8s.io/klog/v2/CONTRIBUTING.md | 22 + vendor/k8s.io/klog/v2/LICENSE | 191 + vendor/k8s.io/klog/v2/OWNERS | 19 + vendor/k8s.io/klog/v2/README.md | 99 + vendor/k8s.io/klog/v2/RELEASE.md | 9 + vendor/k8s.io/klog/v2/SECURITY_CONTACTS | 20 + vendor/k8s.io/klog/v2/code-of-conduct.md | 3 + vendor/k8s.io/klog/v2/go.mod | 5 + vendor/k8s.io/klog/v2/go.sum | 2 + vendor/k8s.io/klog/v2/klog.go | 1525 + vendor/k8s.io/klog/v2/klog_file.go | 164 + vendor/k8s.io/kube-aggregator/LICENSE | 202 + .../pkg/apis/apiregistration/doc.go} | 22 +- .../pkg/apis/apiregistration/helpers.go | 128 + .../pkg/apis/apiregistration/register.go | 54 + .../pkg/apis/apiregistration/types.go | 146 + .../pkg/apis/apiregistration/v1/defaults.go} | 22 +- .../pkg/apis/apiregistration/v1/doc.go | 37 + .../apis/apiregistration/v1}/generated.pb.go | 1410 +- .../apis/apiregistration/v1/generated.proto | 144 + .../pkg/apis/apiregistration/v1/register.go | 61 + .../pkg/apis/apiregistration/v1/types.go | 155 + .../v1/zz_generated.conversion.go | 298 + .../v1/zz_generated.deepcopy.go | 173 + .../v1/zz_generated.defaults.go | 47 + .../apis/apiregistration/v1beta1/defaults.go | 33 + .../pkg/apis/apiregistration/v1beta1/doc.go | 38 + .../apiregistration/v1beta1/generated.pb.go | 1832 + .../apiregistration/v1beta1/generated.proto | 144 + .../apis/apiregistration/v1beta1/register.go | 61 + .../pkg/apis/apiregistration/v1beta1/types.go | 161 + .../v1beta1/zz_generated.conversion.go | 298 + .../v1beta1/zz_generated.deepcopy.go | 173 + .../v1beta1/zz_generated.defaults.go | 47 + .../zz_generated.prerelease-lifecycle.go | 73 + .../apiregistration}/zz_generated.deepcopy.go | 183 +- .../clientset/clientset.go | 111 + .../clientset_generated/clientset/doc.go | 20 + .../clientset/scheme/doc.go | 20 + .../clientset/scheme/register.go | 58 + .../v1/apiregistration_client.go | 89 + .../typed/apiregistration/v1/apiservice.go | 184 + .../clientset/typed/apiregistration/v1/doc.go | 20 + .../apiregistration/v1/generated_expansion.go | 21 + .../v1beta1/apiregistration_client.go | 89 + .../apiregistration/v1beta1/apiservice.go | 184 + .../typed/apiregistration/v1beta1/doc.go | 20 + .../v1beta1/generated_expansion.go | 21 + .../kube-openapi/pkg/util/proto/document.go | 2 +- vendor/k8s.io/utils/pointer/pointer.go | 49 +- vendor/k8s.io/utils/trace/README.md | 67 + vendor/k8s.io/utils/trace/trace.go | 237 +- vendor/modules.txt | 294 +- .../sigs.k8s.io/controller-runtime/.gitignore | 3 + .../controller-runtime/.golangci.yml | 30 + .../controller-runtime/CONTRIBUTING.md | 10 +- .../sigs.k8s.io/controller-runtime/Makefile | 110 + vendor/sigs.k8s.io/controller-runtime/OWNERS | 11 +- .../controller-runtime/OWNERS_ALIASES | 35 +- .../sigs.k8s.io/controller-runtime/README.md | 9 +- .../controller-runtime/TMP-LOGGING.md | 5 +- .../controller-runtime/VERSIONING.md | 279 +- .../sigs.k8s.io/controller-runtime/alias.go | 20 + vendor/sigs.k8s.io/controller-runtime/doc.go | 7 +- vendor/sigs.k8s.io/controller-runtime/go.mod | 51 +- vendor/sigs.k8s.io/controller-runtime/go.sum | 541 +- .../pkg/builder/controller.go | 210 +- .../controller-runtime/pkg/builder/options.go | 117 + .../controller-runtime/pkg/builder/webhook.go | 1 + .../controller-runtime/pkg/cache/cache.go | 16 +- .../pkg/cache/informer_cache.go | 73 +- .../pkg/cache/internal/cache_reader.go | 4 +- .../pkg/cache/internal/deleg_map.go | 60 +- .../pkg/cache/internal/informers_map.go | 110 +- .../pkg/cache/multi_namespace_cache.go | 32 +- .../pkg/client/apiutil/apimachinery.go | 62 +- .../pkg/client/apiutil/dynamicrestmapper.go | 63 +- .../controller-runtime/pkg/client/client.go | 149 +- .../pkg/client/client_cache.go | 46 +- .../controller-runtime/pkg/client/codec.go | 24 + .../pkg/client/config/config.go | 30 +- .../controller-runtime/pkg/client/dryrun.go | 106 + .../pkg/client/interfaces.go | 35 +- .../pkg/client/metadata_client.go | 193 + .../pkg/client/namespaced_client.go | 253 + .../controller-runtime/pkg/client/object.go | 77 + .../controller-runtime/pkg/client/options.go | 94 +- .../controller-runtime/pkg/client/patch.go | 99 +- .../controller-runtime/pkg/client/split.go | 91 +- .../pkg/client/typed_client.go | 51 +- .../pkg/client/unstructured_client.go | 225 +- .../controller-runtime/pkg/config/config.go | 114 + .../controller-runtime/pkg/config/doc.go | 25 + .../pkg/config/v1alpha1/doc.go | 20 + .../pkg/config/v1alpha1/register.go | 37 + .../pkg/config/v1alpha1/types.go | 127 + .../config/v1alpha1/zz_generated.deepcopy.go | 119 + .../pkg/controller/controller.go | 57 +- .../controllerutil/controllerutil.go | 281 +- .../controller-runtime/pkg/envtest/crd.go | 217 +- .../controller-runtime/pkg/envtest/ginkgo.go | 11 - .../controller-runtime/pkg/envtest/helper.go | 49 +- .../pkg/envtest/printer/prow.go | 109 + .../controller-runtime/pkg/envtest/server.go | 99 +- .../controller-runtime/pkg/envtest/webhook.go | 448 + .../controller-runtime/pkg/event/event.go | 30 +- .../controller-runtime/pkg/handler/enqueue.go | 30 +- .../pkg/handler/enqueue_mapped.go | 70 +- .../pkg/handler/enqueue_owner.go | 10 +- .../pkg/internal/controller/controller.go | 177 +- .../internal/controller/metrics/metrics.go | 18 + .../pkg/internal/log/log.go | 3 - .../pkg/internal/recorder/recorder.go | 128 +- .../internal/testing}/integration/.gitignore | 0 .../internal/testing/integration/README.md | 10 + .../testing/integration/addr/manager.go | 74 + .../testing}/integration/apiserver.go | 51 +- .../testing}/integration/control_plane.go | 33 +- .../pkg/internal/testing}/integration/doc.go | 9 +- .../pkg/internal/testing}/integration/etcd.go | 6 +- .../testing/integration/internal/apiserver.go | 27 + .../integration/internal/arguments.go | 1 + .../integration/internal/bin_path_finder.go | 0 .../testing}/integration/internal/etcd.go | 11 +- .../testing}/integration/internal/process.go | 27 +- .../testing/integration/internal/tinyca.go | 151 + .../internal/testing}/integration/kubectl.go | 2 +- .../pkg/leaderelection/doc.go | 2 +- .../pkg/leaderelection/leader_election.go | 34 +- .../controller-runtime/pkg/log/log.go | 55 +- .../controller-runtime/pkg/log/zap/flags.go | 131 + .../pkg/log/zap/kube_helpers.go | 4 +- .../controller-runtime/pkg/log/zap/zap.go | 164 +- .../pkg/manager/client_builder.go | 61 + .../pkg/manager/internal.go | 451 +- .../controller-runtime/pkg/manager/manager.go | 329 +- .../pkg/manager/signals/signal.go | 10 +- .../pkg/metrics/client_go_adapter.go | 88 +- .../pkg/metrics/listener.go | 6 +- .../pkg/metrics/registry.go | 9 +- .../pkg/metrics/workqueue.go | 162 +- .../pkg/predicate/predicate.go | 186 +- .../controller-runtime/pkg/ratelimiter/doc.go | 22 + .../pkg/ratelimiter/ratelimiter.go | 30 + .../pkg/reconcile/reconcile.go | 17 +- .../pkg/runtime/inject/inject.go | 4 +- .../controller-runtime/pkg/scheme/scheme.go | 14 +- .../pkg/source/internal/eventsource.go | 65 +- .../controller-runtime/pkg/source/source.go | 74 +- .../pkg/webhook/admission/http.go | 66 +- .../pkg/webhook/admission/multi.go | 17 +- .../pkg/webhook/admission/response.go | 39 +- .../pkg/webhook/admission/validator.go | 22 +- .../pkg/webhook/admission/webhook.go | 10 +- .../pkg/webhook/conversion/conversion.go | 12 +- .../internal/certwatcher/certwatcher.go | 9 +- .../pkg/webhook/internal/metrics/metrics.go | 36 +- .../controller-runtime/pkg/webhook/server.go | 152 +- .../v4}/LICENSE | 2 +- .../v4/value/allocator.go | 203 + .../structured-merge-diff/v4/value/doc.go | 21 + .../structured-merge-diff/v4/value/fields.go | 97 + .../v4/value/jsontagutil.go | 91 + .../structured-merge-diff/v4/value/list.go | 139 + .../v4/value/listreflect.go | 98 + .../v4/value/listunstructured.go | 74 + .../structured-merge-diff/v4/value/map.go | 270 + .../v4/value/mapreflect.go | 209 + .../v4/value/mapunstructured.go | 190 + .../v4/value/reflectcache.go | 463 + .../structured-merge-diff/v4/value/scalar.go | 50 + .../v4/value/structreflect.go | 208 + .../structured-merge-diff/v4/value/value.go | 347 + .../v4/value/valuereflect.go | 294 + .../v4/value/valueunstructured.go | 178 + .../testing_frameworks/integration/README.md | 8 - .../integration/addr/manager.go | 24 - .../integration/internal/apiserver.go | 17 - vendor/sigs.k8s.io/yaml/.travis.yml | 15 +- vendor/sigs.k8s.io/yaml/OWNERS | 2 + vendor/sigs.k8s.io/yaml/README.md | 14 +- vendor/sigs.k8s.io/yaml/go.mod | 8 + vendor/sigs.k8s.io/yaml/go.sum | 9 + vendor/sigs.k8s.io/yaml/yaml.go | 61 + 1892 files changed, 330120 insertions(+), 96000 deletions(-) create mode 100644 vendor/github.com/docker/spdystream/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/spdystream/LICENSE create mode 100644 vendor/github.com/docker/spdystream/LICENSE.docs create mode 100644 vendor/github.com/docker/spdystream/MAINTAINERS create mode 100644 vendor/github.com/docker/spdystream/README.md create mode 100644 vendor/github.com/docker/spdystream/connection.go create mode 100644 vendor/github.com/docker/spdystream/handlers.go create mode 100644 vendor/github.com/docker/spdystream/priority.go create mode 100644 vendor/github.com/docker/spdystream/spdy/dictionary.go create mode 100644 vendor/github.com/docker/spdystream/spdy/read.go create mode 100644 vendor/github.com/docker/spdystream/spdy/types.go create mode 100644 vendor/github.com/docker/spdystream/spdy/write.go create mode 100644 vendor/github.com/docker/spdystream/stream.go create mode 100644 vendor/github.com/docker/spdystream/utils.go create mode 100644 vendor/github.com/fsnotify/fsnotify/.editorconfig create mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes rename vendor/{gopkg.in/fsnotify.v1 => github.com/fsnotify/fsnotify}/.gitignore (100%) create mode 100644 vendor/github.com/fsnotify/fsnotify/.travis.yml rename vendor/{gopkg.in/fsnotify.v1 => github.com/fsnotify/fsnotify}/AUTHORS (100%) rename vendor/{gopkg.in/fsnotify.v1 => github.com/fsnotify/fsnotify}/CHANGELOG.md (100%) rename vendor/{gopkg.in/fsnotify.v1 => github.com/fsnotify/fsnotify}/CONTRIBUTING.md (100%) rename vendor/{gopkg.in/fsnotify.v1 => github.com/fsnotify/fsnotify}/LICENSE (95%) rename vendor/{gopkg.in/fsnotify.v1 => github.com/fsnotify/fsnotify}/README.md (53%) rename vendor/{gopkg.in/fsnotify.v1 => github.com/fsnotify/fsnotify}/fen.go (100%) rename vendor/{gopkg.in/fsnotify.v1 => github.com/fsnotify/fsnotify}/fsnotify.go (95%) create mode 100644 vendor/github.com/fsnotify/fsnotify/go.mod create mode 100644 vendor/github.com/fsnotify/fsnotify/go.sum rename vendor/{gopkg.in/fsnotify.v1 => github.com/fsnotify/fsnotify}/inotify.go (100%) rename vendor/{gopkg.in/fsnotify.v1 => github.com/fsnotify/fsnotify}/inotify_poller.go (97%) rename vendor/{gopkg.in/fsnotify.v1 => github.com/fsnotify/fsnotify}/kqueue.go (100%) rename vendor/{gopkg.in/fsnotify.v1 => github.com/fsnotify/fsnotify}/open_mode_bsd.go (79%) rename vendor/{gopkg.in/fsnotify.v1 => github.com/fsnotify/fsnotify}/open_mode_darwin.go (84%) rename vendor/{gopkg.in/fsnotify.v1 => github.com/fsnotify/fsnotify}/windows.go (100%) create mode 100644 vendor/github.com/gardener/external-dns-management/LICENSE.md create mode 100644 vendor/github.com/gardener/external-dns-management/NOTICE.md create mode 100644 vendor/github.com/gardener/external-dns-management/pkg/apis/dns/register.go create mode 100644 vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsannotation.go create mode 100644 vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsentry.go create mode 100644 vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsowner.go create mode 100644 vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsprovider.go create mode 100644 vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/doc.go create mode 100644 vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/register.go create mode 100644 vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/state.go create mode 100644 vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/gardener/gardener-resource-manager/LICENSE.md create mode 100644 vendor/github.com/gardener/gardener-resource-manager/NOTICE.md create mode 100644 vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/register.go create mode 100644 vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/doc.go create mode 100644 vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/register.go create mode 100644 vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/types.go create mode 100644 vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/zz_generated.deepcopy.go rename vendor/github.com/gardener/gardener/hack/.ci/{ci.go => doc.go} (98%) create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/conversions.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/defaults.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.pb.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.proto create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/register.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_backupbucket.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_backupentry.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_cloudprofile.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_common.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_controllerinstallation.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_controllerregistration.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_plant.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_project.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_quota.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_secretbinding.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_seed.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shoot.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shootstate.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_utils.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.conversion.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.defaults.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/field_constants.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/helper/helper.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/register.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/types_managedseed.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/conversions.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/defaults.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/generated.pb.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/generated.proto create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/register.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/types_managedseed.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/zz_generated.conversion.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/zz_generated.defaults.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/zz_generated.deepcopy.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/settings/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/settings/register.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/settings/types_cluster_openidconnect_preset.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/settings/types_openidconnect_preset.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/settings/types_shared.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/defaults.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/generated.pb.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/generated.proto create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/register.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/types_cluster_openidconnect_preset.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/types_openidconnect_preset.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/types_shared.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/zz_generated.conversion.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/zz_generated.defaults.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/settings/zz_generated.deepcopy.go create mode 100644 vendor/github.com/gardener/gardener/pkg/chartrenderer/default.go create mode 100644 vendor/github.com/gardener/gardener/pkg/chartrenderer/factory.go create mode 100644 vendor/github.com/gardener/gardener/pkg/chartrenderer/renderer.go create mode 100644 vendor/github.com/gardener/gardener/pkg/chartrenderer/sorter.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/clientset.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme/register.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/backupbucket.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/backupentry.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/cloudprofile.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/controllerinstallation.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/controllerregistration.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/core_client.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/generated_expansion.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/plant.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/project.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/quota.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/secretbinding.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/seed.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/shoot.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/shootstate.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/backupbucket.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/backupentry.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/cloudprofile.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/controllerinstallation.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/controllerregistration.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/core_client.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/generated_expansion.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/plant.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/project.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/quota.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/secretbinding.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/seed.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/shoot.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/extensions/clientset/versioned/scheme/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/extensions/clientset/versioned/scheme/register.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/admissionplugins.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/applier.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartapplier.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartoptions.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/client.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/clientset.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/deployments.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/manifestoptions.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/options.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/pods.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/runtime_client.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/scaling.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/types.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/clientset.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/scheme/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/scheme/register.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/typed/seedmanagement/v1alpha1/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/typed/seedmanagement/v1alpha1/generated_expansion.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/typed/seedmanagement/v1alpha1/managedseed.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/typed/seedmanagement/v1alpha1/seedmanagement_client.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/settings/clientset/versioned/scheme/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/client/settings/clientset/versioned/scheme/register.go create mode 100644 vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/helper/helpers.go create mode 100644 vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/register.go create mode 100644 vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/types.go create mode 100644 vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/defaults.go create mode 100644 vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/register.go create mode 100644 vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/types.go create mode 100644 vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.conversion.go create mode 100644 vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.defaults.go create mode 100644 vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/zz_generated.deepcopy.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/checksums.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/and.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/labels.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/object.go create mode 100644 vendor/github.com/gardener/hvpa-controller/LICENSE.md create mode 100644 vendor/github.com/gardener/hvpa-controller/api/v1alpha1/groupversion_info.go create mode 100644 vendor/github.com/gardener/hvpa-controller/api/v1alpha1/hvpa_types.go create mode 100644 vendor/github.com/gardener/hvpa-controller/api/v1alpha1/hvpa_webhook.go create mode 100644 vendor/github.com/gardener/hvpa-controller/api/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/go-logr/logr/discard.go create mode 100644 vendor/github.com/go-logr/logr/go.mod create mode 100644 vendor/github.com/go-logr/zapr/go.mod create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/Makefile create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/doc.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.proto create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/helper.go create mode 100644 vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go create mode 100644 vendor/github.com/gogo/protobuf/types/any.go create mode 100644 vendor/github.com/gogo/protobuf/types/any.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/api.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/doc.go create mode 100644 vendor/github.com/gogo/protobuf/types/duration.go create mode 100644 vendor/github.com/gogo/protobuf/types/duration.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/duration_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/types/empty.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/field_mask.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/protosize.go create mode 100644 vendor/github.com/gogo/protobuf/types/source_context.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/struct.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/timestamp.go create mode 100644 vendor/github.com/gogo/protobuf/types/timestamp.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/timestamp_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/types/type.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/wrappers.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/wrappers_gogo.go create mode 100644 vendor/github.com/golang/protobuf/proto/buffer.go delete mode 100644 vendor/github.com/golang/protobuf/proto/clone.go delete mode 100644 vendor/github.com/golang/protobuf/proto/decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/defaults.go delete mode 100644 vendor/github.com/golang/protobuf/proto/encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/equal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/lib.go delete mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/golang/protobuf/proto/proto.go create mode 100644 vendor/github.com/golang/protobuf/proto/registry.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_marshal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_merge.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_unmarshal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/golang/protobuf/proto/wire.go create mode 100644 vendor/github.com/golang/protobuf/proto/wrappers.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.proto delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.proto delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/name.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_references.go create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/document.go delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/extension-handler.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/extensions.go create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/README.md create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/base.go create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/display.go create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/models.go create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/operations.go create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/reader.go create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/schema.json create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/writer.go delete mode 100644 vendor/github.com/hpcloud/tail/.gitignore delete mode 100644 vendor/github.com/hpcloud/tail/CHANGES.md delete mode 100644 vendor/github.com/hpcloud/tail/Dockerfile delete mode 100644 vendor/github.com/hpcloud/tail/Makefile delete mode 100644 vendor/github.com/hpcloud/tail/README.md create mode 100644 vendor/github.com/imdario/mergo/.deepsource.toml create mode 100644 vendor/github.com/imdario/mergo/go.mod create mode 100644 vendor/github.com/imdario/mergo/go.sum create mode 100644 vendor/github.com/nxadm/tail/.gitignore rename vendor/github.com/{hpcloud => nxadm}/tail/.travis.yml (52%) create mode 100644 vendor/github.com/nxadm/tail/CHANGES.md create mode 100644 vendor/github.com/nxadm/tail/Dockerfile rename vendor/github.com/{hpcloud/tail/LICENSE.txt => nxadm/tail/LICENSE} (100%) create mode 100644 vendor/github.com/nxadm/tail/README.md rename vendor/github.com/{hpcloud => nxadm}/tail/appveyor.yml (77%) create mode 100644 vendor/github.com/nxadm/tail/go.mod create mode 100644 vendor/github.com/nxadm/tail/go.sum rename vendor/github.com/{hpcloud => nxadm}/tail/ratelimiter/Licence (100%) rename vendor/github.com/{hpcloud => nxadm}/tail/ratelimiter/leakybucket.go (100%) rename vendor/github.com/{hpcloud => nxadm}/tail/ratelimiter/memory.go (81%) rename vendor/github.com/{hpcloud => nxadm}/tail/ratelimiter/storage.go (100%) rename vendor/github.com/{hpcloud => nxadm}/tail/tail.go (85%) rename vendor/github.com/{hpcloud => nxadm}/tail/tail_posix.go (71%) rename vendor/github.com/{hpcloud => nxadm}/tail/tail_windows.go (81%) rename vendor/github.com/{hpcloud => nxadm}/tail/util/util.go (94%) rename vendor/github.com/{hpcloud => nxadm}/tail/watch/filechanges.go (93%) rename vendor/github.com/{hpcloud => nxadm}/tail/watch/inotify.go (88%) rename vendor/github.com/{hpcloud => nxadm}/tail/watch/inotify_tracker.go (86%) rename vendor/github.com/{hpcloud => nxadm}/tail/watch/polling.go (98%) rename vendor/github.com/{hpcloud => nxadm}/tail/watch/watch.go (100%) rename vendor/github.com/{hpcloud => nxadm}/tail/winfile/winfile.go (100%) create mode 100644 vendor/github.com/onsi/ginkgo/go.mod create mode 100644 vendor/github.com/onsi/ginkgo/go.sum create mode 100644 vendor/github.com/onsi/ginkgo/internal/global/init.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_darwin.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_dragonfly.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_freebsd.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux_mips64le.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_netbsd.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_openbsd.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_solaris.go delete mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go delete mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go delete mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go create mode 100644 vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go create mode 100644 vendor/github.com/prometheus/common/expfmt/openmetrics_create.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_arm.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_arm64.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_default.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mips.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mips64.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_s390x.go create mode 100644 vendor/github.com/prometheus/procfs/fscache.go create mode 100644 vendor/github.com/prometheus/procfs/kernel_random.go create mode 100644 vendor/github.com/prometheus/procfs/loadavg.go create mode 100644 vendor/github.com/prometheus/procfs/net_conntrackstat.go create mode 100644 vendor/github.com/prometheus/procfs/net_udp.go create mode 100644 vendor/github.com/prometheus/procfs/proc_cgroup.go create mode 100644 vendor/github.com/prometheus/procfs/proc_maps.go create mode 100644 vendor/github.com/prometheus/procfs/proc_smaps.go create mode 100644 vendor/github.com/prometheus/procfs/swaps.go create mode 100644 vendor/github.com/sirupsen/logrus/.golangci.yml create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_js.go delete mode 100644 vendor/go.uber.org/atomic/tools.go delete mode 100644 vendor/go.uber.org/multierr/tools.go delete mode 100644 vendor/go.uber.org/tools/LICENSE delete mode 100644 vendor/go.uber.org/tools/update-license/.gitignore delete mode 100644 vendor/go.uber.org/tools/update-license/README.md delete mode 100644 vendor/go.uber.org/tools/update-license/licenses.go delete mode 100644 vendor/go.uber.org/tools/update-license/main.go delete mode 100644 vendor/go.uber.org/zap/tools.go create mode 100644 vendor/go.uber.org/zap/zapcore/increase_level.go delete mode 100644 vendor/golang.org/x/lint/.travis.yml delete mode 100644 vendor/golang.org/x/lint/CONTRIBUTING.md delete mode 100644 vendor/golang.org/x/lint/README.md delete mode 100644 vendor/golang.org/x/lint/go.mod delete mode 100644 vendor/golang.org/x/lint/go.sum delete mode 100644 vendor/golang.org/x/lint/golint/golint.go delete mode 100644 vendor/golang.org/x/lint/golint/import.go delete mode 100644 vendor/golang.org/x/lint/golint/importcomment.go delete mode 100644 vendor/golang.org/x/lint/lint.go rename vendor/golang.org/x/net/idna/{tables12.00.go => tables12.0.0.go} (99%) create mode 100644 vendor/golang.org/x/net/idna/tables13.0.0.go create mode 100644 vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s create mode 100644 vendor/golang.org/x/sys/unix/syscall_illumos.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go rename vendor/golang.org/x/sys/unix/{zsyscall_darwin_arm64.1_11.go => zsyscall_openbsd_mips64.go} (85%) create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/windows/setupapierrors_windows.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables12.0.0.go delete mode 100644 vendor/golang.org/x/tools/LICENSE delete mode 100644 vendor/golang.org/x/tools/go/analysis/analysis.go delete mode 100644 vendor/golang.org/x/tools/go/analysis/diagnostic.go delete mode 100644 vendor/golang.org/x/tools/go/analysis/doc.go delete mode 100644 vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go delete mode 100644 vendor/golang.org/x/tools/go/analysis/validate.go delete mode 100644 vendor/golang.org/x/tools/go/ast/astutil/enclosing.go delete mode 100644 vendor/golang.org/x/tools/go/ast/astutil/imports.go delete mode 100644 vendor/golang.org/x/tools/go/ast/astutil/rewrite.go delete mode 100644 vendor/golang.org/x/tools/go/ast/astutil/util.go delete mode 100644 vendor/golang.org/x/tools/go/ast/inspector/inspector.go delete mode 100644 vendor/golang.org/x/tools/go/ast/inspector/typeof.go delete mode 100644 vendor/golang.org/x/tools/go/buildutil/allpackages.go delete mode 100644 vendor/golang.org/x/tools/go/buildutil/fakecontext.go delete mode 100644 vendor/golang.org/x/tools/go/buildutil/overlay.go delete mode 100644 vendor/golang.org/x/tools/go/buildutil/tags.go delete mode 100644 vendor/golang.org/x/tools/go/buildutil/util.go delete mode 100644 vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go delete mode 100644 vendor/golang.org/x/tools/go/gcexportdata/importer.go delete mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go delete mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go delete mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go delete mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go delete mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go delete mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go delete mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go delete mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go delete mode 100644 vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go delete mode 100644 vendor/golang.org/x/tools/go/packages/doc.go delete mode 100644 vendor/golang.org/x/tools/go/packages/external.go delete mode 100644 vendor/golang.org/x/tools/go/packages/golist.go delete mode 100644 vendor/golang.org/x/tools/go/packages/golist_overlay.go delete mode 100644 vendor/golang.org/x/tools/go/packages/loadmode_string.go delete mode 100644 vendor/golang.org/x/tools/go/packages/packages.go delete mode 100644 vendor/golang.org/x/tools/go/packages/visit.go delete mode 100644 vendor/golang.org/x/tools/go/types/objectpath/objectpath.go delete mode 100644 vendor/golang.org/x/tools/go/types/typeutil/callee.go delete mode 100644 vendor/golang.org/x/tools/go/types/typeutil/imports.go delete mode 100644 vendor/golang.org/x/tools/go/types/typeutil/map.go delete mode 100644 vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go delete mode 100644 vendor/golang.org/x/tools/go/types/typeutil/ui.go delete mode 100644 vendor/golang.org/x/tools/internal/analysisinternal/analysis.go delete mode 100644 vendor/golang.org/x/tools/internal/gocommand/invoke.go delete mode 100644 vendor/golang.org/x/tools/internal/packagesinternal/packages.go delete mode 100644 vendor/golang.org/x/tools/internal/telemetry/event/event.go delete mode 100644 vendor/golang.org/x/tools/internal/telemetry/event/export.go delete mode 100644 vendor/golang.org/x/tools/internal/telemetry/event/key.go delete mode 100644 vendor/golang.org/x/tools/internal/telemetry/event/label.go delete mode 100644 vendor/golang.org/x/tools/internal/telemetry/event/log.go delete mode 100644 vendor/golang.org/x/tools/internal/telemetry/event/metric.go delete mode 100644 vendor/golang.org/x/tools/internal/telemetry/event/tag.go delete mode 100644 vendor/golang.org/x/tools/internal/telemetry/event/trace.go delete mode 100644 vendor/golang.org/x/xerrors/LICENSE delete mode 100644 vendor/golang.org/x/xerrors/PATENTS delete mode 100644 vendor/golang.org/x/xerrors/README delete mode 100644 vendor/golang.org/x/xerrors/adaptor.go delete mode 100644 vendor/golang.org/x/xerrors/codereview.cfg delete mode 100644 vendor/golang.org/x/xerrors/doc.go delete mode 100644 vendor/golang.org/x/xerrors/errors.go delete mode 100644 vendor/golang.org/x/xerrors/fmt.go delete mode 100644 vendor/golang.org/x/xerrors/format.go delete mode 100644 vendor/golang.org/x/xerrors/frame.go delete mode 100644 vendor/golang.org/x/xerrors/go.mod delete mode 100644 vendor/golang.org/x/xerrors/wrap.go rename vendor/{golang.org/x/tools => google.golang.org/protobuf}/AUTHORS (74%) rename vendor/{golang.org/x/tools => google.golang.org/protobuf}/CONTRIBUTORS (70%) rename vendor/{golang.org/x/lint => google.golang.org/protobuf}/LICENSE (96%) rename vendor/{golang.org/x/tools => google.golang.org/protobuf}/PATENTS (100%) create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/decode.go rename vendor/{golang.org/x/sys/unix/syscall_darwin_386.1_11.go => google.golang.org/protobuf/encoding/prototext/doc.go} (52%) create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/encode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protowire/wire.go create mode 100644 vendor/google.golang.org/protobuf/internal/descfmt/stringer.go create mode 100644 vendor/google.golang.org/protobuf/internal/descopts/options.go create mode 100644 vendor/google.golang.org/protobuf/internal/detrand/rand.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/defval/default.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/errors.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go113.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go rename vendor/{golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go => google.golang.org/protobuf/internal/fieldnum/doc.go} (50%) create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go rename vendor/{golang.org/x/sys/unix/syscall_darwin_arm.1_11.go => google.golang.org/protobuf/internal/fieldnum/empty_gen.go} (54%) create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go create mode 100644 vendor/google.golang.org/protobuf/internal/filetype/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/flags.go rename vendor/{golang.org/x/xerrors/internal/internal.go => google.golang.org/protobuf/internal/flags/proto_legacy_disable.go} (57%) create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go create mode 100644 vendor/google.golang.org/protobuf/internal/genname/name.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/api_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go rename vendor/{golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go => google.golang.org/protobuf/internal/impl/codec_map_go112.go} (53%) create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_tables.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_file.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/validate.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/weak.go create mode 100644 vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go create mode 100644 vendor/google.golang.org/protobuf/internal/pragma/pragma.go create mode 100644 vendor/google.golang.org/protobuf/internal/set/ints.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/version/version.go create mode 100644 vendor/google.golang.org/protobuf/proto/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/doc.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/equal.go create mode 100644 vendor/google.golang.org/protobuf/proto/extension.go create mode 100644 vendor/google.golang.org/protobuf/proto/merge.go create mode 100644 vendor/google.golang.org/protobuf/proto/messageset.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_methods.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_reflect.go create mode 100644 vendor/google.golang.org/protobuf/proto/reset.go create mode 100644 vendor/google.golang.org/protobuf/proto/size.go create mode 100644 vendor/google.golang.org/protobuf/proto/size_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/type.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/methods.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/version.go create mode 100644 vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go delete mode 100644 vendor/gopkg.in/fsnotify.v1/.editorconfig delete mode 100644 vendor/gopkg.in/fsnotify.v1/.travis.yml create mode 100644 vendor/gopkg.in/yaml.v3/.travis.yml create mode 100644 vendor/gopkg.in/yaml.v3/LICENSE create mode 100644 vendor/gopkg.in/yaml.v3/NOTICE create mode 100644 vendor/gopkg.in/yaml.v3/README.md create mode 100644 vendor/gopkg.in/yaml.v3/apic.go create mode 100644 vendor/gopkg.in/yaml.v3/decode.go create mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v3/encode.go create mode 100644 vendor/gopkg.in/yaml.v3/go.mod create mode 100644 vendor/gopkg.in/yaml.v3/parserc.go create mode 100644 vendor/gopkg.in/yaml.v3/readerc.go create mode 100644 vendor/gopkg.in/yaml.v3/resolve.go create mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v3/sorter.go create mode 100644 vendor/gopkg.in/yaml.v3/writerc.go create mode 100644 vendor/gopkg.in/yaml.v3/yaml.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go delete mode 100644 vendor/honnef.co/go/tools/LICENSE delete mode 100644 vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY delete mode 100644 vendor/honnef.co/go/tools/arg/arg.go delete mode 100644 vendor/honnef.co/go/tools/cmd/staticcheck/README.md delete mode 100644 vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go delete mode 100644 vendor/honnef.co/go/tools/config/config.go delete mode 100644 vendor/honnef.co/go/tools/config/example.conf delete mode 100644 vendor/honnef.co/go/tools/deprecated/stdlib.go delete mode 100644 vendor/honnef.co/go/tools/facts/deprecated.go delete mode 100644 vendor/honnef.co/go/tools/facts/generated.go delete mode 100644 vendor/honnef.co/go/tools/facts/purity.go delete mode 100644 vendor/honnef.co/go/tools/facts/token.go delete mode 100644 vendor/honnef.co/go/tools/functions/loops.go delete mode 100644 vendor/honnef.co/go/tools/functions/pure.go delete mode 100644 vendor/honnef.co/go/tools/functions/terminates.go delete mode 100644 vendor/honnef.co/go/tools/go/types/typeutil/callee.go delete mode 100644 vendor/honnef.co/go/tools/go/types/typeutil/identical.go delete mode 100644 vendor/honnef.co/go/tools/go/types/typeutil/imports.go delete mode 100644 vendor/honnef.co/go/tools/go/types/typeutil/map.go delete mode 100644 vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go delete mode 100644 vendor/honnef.co/go/tools/go/types/typeutil/ui.go delete mode 100644 vendor/honnef.co/go/tools/internal/cache/cache.go delete mode 100644 vendor/honnef.co/go/tools/internal/cache/default.go delete mode 100644 vendor/honnef.co/go/tools/internal/cache/hash.go delete mode 100644 vendor/honnef.co/go/tools/internal/passes/buildssa/buildssa.go delete mode 100644 vendor/honnef.co/go/tools/internal/renameio/renameio.go delete mode 100644 vendor/honnef.co/go/tools/internal/sharedcheck/lint.go delete mode 100644 vendor/honnef.co/go/tools/lint/LICENSE delete mode 100644 vendor/honnef.co/go/tools/lint/lint.go delete mode 100644 vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go delete mode 100644 vendor/honnef.co/go/tools/lint/lintutil/format/format.go delete mode 100644 vendor/honnef.co/go/tools/lint/lintutil/stats.go delete mode 100644 vendor/honnef.co/go/tools/lint/lintutil/stats_bsd.go delete mode 100644 vendor/honnef.co/go/tools/lint/lintutil/stats_posix.go delete mode 100644 vendor/honnef.co/go/tools/lint/lintutil/util.go delete mode 100644 vendor/honnef.co/go/tools/lint/runner.go delete mode 100644 vendor/honnef.co/go/tools/lint/stats.go delete mode 100644 vendor/honnef.co/go/tools/loader/loader.go delete mode 100644 vendor/honnef.co/go/tools/printf/fuzz.go delete mode 100644 vendor/honnef.co/go/tools/printf/printf.go delete mode 100644 vendor/honnef.co/go/tools/simple/CONTRIBUTING.md delete mode 100644 vendor/honnef.co/go/tools/simple/analysis.go delete mode 100644 vendor/honnef.co/go/tools/simple/doc.go delete mode 100644 vendor/honnef.co/go/tools/simple/lint.go delete mode 100644 vendor/honnef.co/go/tools/ssa/LICENSE delete mode 100644 vendor/honnef.co/go/tools/ssa/blockopt.go delete mode 100644 vendor/honnef.co/go/tools/ssa/builder.go delete mode 100644 vendor/honnef.co/go/tools/ssa/const.go delete mode 100644 vendor/honnef.co/go/tools/ssa/create.go delete mode 100644 vendor/honnef.co/go/tools/ssa/doc.go delete mode 100644 vendor/honnef.co/go/tools/ssa/dom.go delete mode 100644 vendor/honnef.co/go/tools/ssa/emit.go delete mode 100644 vendor/honnef.co/go/tools/ssa/func.go delete mode 100644 vendor/honnef.co/go/tools/ssa/identical.go delete mode 100644 vendor/honnef.co/go/tools/ssa/identical_17.go delete mode 100644 vendor/honnef.co/go/tools/ssa/lift.go delete mode 100644 vendor/honnef.co/go/tools/ssa/lvalue.go delete mode 100644 vendor/honnef.co/go/tools/ssa/methods.go delete mode 100644 vendor/honnef.co/go/tools/ssa/mode.go delete mode 100644 vendor/honnef.co/go/tools/ssa/print.go delete mode 100644 vendor/honnef.co/go/tools/ssa/sanity.go delete mode 100644 vendor/honnef.co/go/tools/ssa/source.go delete mode 100644 vendor/honnef.co/go/tools/ssa/ssa.go delete mode 100644 vendor/honnef.co/go/tools/ssa/staticcheck.conf delete mode 100644 vendor/honnef.co/go/tools/ssa/testmain.go delete mode 100644 vendor/honnef.co/go/tools/ssa/util.go delete mode 100644 vendor/honnef.co/go/tools/ssa/wrappers.go delete mode 100644 vendor/honnef.co/go/tools/ssa/write.go delete mode 100644 vendor/honnef.co/go/tools/ssautil/ssautil.go delete mode 100644 vendor/honnef.co/go/tools/staticcheck/CONTRIBUTING.md delete mode 100644 vendor/honnef.co/go/tools/staticcheck/analysis.go delete mode 100644 vendor/honnef.co/go/tools/staticcheck/buildtag.go delete mode 100644 vendor/honnef.co/go/tools/staticcheck/doc.go delete mode 100644 vendor/honnef.co/go/tools/staticcheck/knowledge.go delete mode 100644 vendor/honnef.co/go/tools/staticcheck/lint.go delete mode 100644 vendor/honnef.co/go/tools/staticcheck/rules.go delete mode 100644 vendor/honnef.co/go/tools/staticcheck/structtag.go delete mode 100644 vendor/honnef.co/go/tools/staticcheck/vrp/channel.go delete mode 100644 vendor/honnef.co/go/tools/staticcheck/vrp/int.go delete mode 100644 vendor/honnef.co/go/tools/staticcheck/vrp/slice.go delete mode 100644 vendor/honnef.co/go/tools/staticcheck/vrp/string.go delete mode 100644 vendor/honnef.co/go/tools/staticcheck/vrp/vrp.go delete mode 100644 vendor/honnef.co/go/tools/stylecheck/analysis.go delete mode 100644 vendor/honnef.co/go/tools/stylecheck/doc.go delete mode 100644 vendor/honnef.co/go/tools/stylecheck/lint.go delete mode 100644 vendor/honnef.co/go/tools/stylecheck/names.go delete mode 100644 vendor/honnef.co/go/tools/unused/edge.go delete mode 100644 vendor/honnef.co/go/tools/unused/edgekind_string.go delete mode 100644 vendor/honnef.co/go/tools/unused/implements.go delete mode 100644 vendor/honnef.co/go/tools/unused/unused.go delete mode 100644 vendor/honnef.co/go/tools/version/buildinfo.go delete mode 100644 vendor/honnef.co/go/tools/version/buildinfo111.go delete mode 100644 vendor/honnef.co/go/tools/version/version.go create mode 100644 vendor/istio.io/api/LICENSE create mode 100644 vendor/istio.io/api/analysis/v1alpha1/message.gen.json create mode 100644 vendor/istio.io/api/analysis/v1alpha1/message.pb.go create mode 100644 vendor/istio.io/api/analysis/v1alpha1/message.proto create mode 100644 vendor/istio.io/api/analysis/v1alpha1/message_deepcopy.gen.go create mode 100644 vendor/istio.io/api/analysis/v1alpha1/message_json.gen.go create mode 100644 vendor/istio.io/api/meta/v1alpha1/status.gen.json create mode 100644 vendor/istio.io/api/meta/v1alpha1/status.pb.go create mode 100644 vendor/istio.io/api/meta/v1alpha1/status.proto create mode 100644 vendor/istio.io/api/meta/v1alpha1/status_deepcopy.gen.go create mode 100644 vendor/istio.io/api/meta/v1alpha1/status_json.gen.go create mode 100644 vendor/istio.io/api/networking/v1beta1/destination_rule.gen.json create mode 100644 vendor/istio.io/api/networking/v1beta1/destination_rule.pb.go create mode 100644 vendor/istio.io/api/networking/v1beta1/destination_rule.proto create mode 100644 vendor/istio.io/api/networking/v1beta1/destination_rule_deepcopy.gen.go create mode 100644 vendor/istio.io/api/networking/v1beta1/destination_rule_json.gen.go create mode 100644 vendor/istio.io/api/networking/v1beta1/gateway.gen.json create mode 100644 vendor/istio.io/api/networking/v1beta1/gateway.pb.go create mode 100644 vendor/istio.io/api/networking/v1beta1/gateway.proto create mode 100644 vendor/istio.io/api/networking/v1beta1/gateway_deepcopy.gen.go create mode 100644 vendor/istio.io/api/networking/v1beta1/gateway_json.gen.go create mode 100644 vendor/istio.io/api/networking/v1beta1/service_entry.gen.json create mode 100644 vendor/istio.io/api/networking/v1beta1/service_entry.pb.go create mode 100644 vendor/istio.io/api/networking/v1beta1/service_entry.proto create mode 100644 vendor/istio.io/api/networking/v1beta1/service_entry_deepcopy.gen.go create mode 100644 vendor/istio.io/api/networking/v1beta1/service_entry_json.gen.go create mode 100644 vendor/istio.io/api/networking/v1beta1/sidecar.gen.json create mode 100644 vendor/istio.io/api/networking/v1beta1/sidecar.pb.go create mode 100644 vendor/istio.io/api/networking/v1beta1/sidecar.proto create mode 100644 vendor/istio.io/api/networking/v1beta1/sidecar_deepcopy.gen.go create mode 100644 vendor/istio.io/api/networking/v1beta1/sidecar_json.gen.go create mode 100644 vendor/istio.io/api/networking/v1beta1/virtual_service.gen.json create mode 100644 vendor/istio.io/api/networking/v1beta1/virtual_service.pb.go create mode 100644 vendor/istio.io/api/networking/v1beta1/virtual_service.proto create mode 100644 vendor/istio.io/api/networking/v1beta1/virtual_service_deepcopy.gen.go create mode 100644 vendor/istio.io/api/networking/v1beta1/virtual_service_json.gen.go create mode 100644 vendor/istio.io/api/networking/v1beta1/workload_entry.gen.json create mode 100644 vendor/istio.io/api/networking/v1beta1/workload_entry.pb.go create mode 100644 vendor/istio.io/api/networking/v1beta1/workload_entry.proto create mode 100644 vendor/istio.io/api/networking/v1beta1/workload_entry_deepcopy.gen.go create mode 100644 vendor/istio.io/api/networking/v1beta1/workload_entry_json.gen.go create mode 100644 vendor/istio.io/client-go/LICENSE create mode 100644 vendor/istio.io/client-go/pkg/apis/networking/v1beta1/doc.go create mode 100644 vendor/istio.io/client-go/pkg/apis/networking/v1beta1/register.gen.go create mode 100644 vendor/istio.io/client-go/pkg/apis/networking/v1beta1/types.gen.go create mode 100644 vendor/istio.io/client-go/pkg/apis/networking/v1beta1/zz_generated.deepcopy.gen.go create mode 100644 vendor/istio.io/gogo-genproto/LICENSE create mode 100644 vendor/istio.io/gogo-genproto/googleapis/google/api/annotations.pb.go create mode 100644 vendor/istio.io/gogo-genproto/googleapis/google/api/http.pb.go create mode 100644 vendor/k8s.io/api/admission/v1/doc.go create mode 100644 vendor/k8s.io/api/admission/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/admission/v1/generated.proto create mode 100644 vendor/k8s.io/api/admission/v1/register.go create mode 100644 vendor/k8s.io/api/admission/v1/types.go create mode 100644 vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/authorization/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go rename vendor/k8s.io/api/{auditregistration/v1alpha1 => certificates/v1}/doc.go (80%) create mode 100644 vendor/k8s.io/api/certificates/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/certificates/v1/generated.proto create mode 100644 vendor/k8s.io/api/certificates/v1/register.go create mode 100644 vendor/k8s.io/api/certificates/v1/types.go create mode 100644 vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/certificates/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/coordination/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/core/v1/lifecycle.go create mode 100644 vendor/k8s.io/api/core/v1/well_known_taints.go create mode 100644 vendor/k8s.io/api/discovery/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/discovery/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/discovery/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/discovery/v1beta1/register.go create mode 100644 vendor/k8s.io/api/discovery/v1beta1/types.go create mode 100644 vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/discovery/v1beta1/well_known_labels.go create mode 100644 vendor/k8s.io/api/discovery/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/discovery/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/events/v1/doc.go create mode 100644 vendor/k8s.io/api/events/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/events/v1/generated.proto create mode 100644 vendor/k8s.io/api/events/v1/register.go create mode 100644 vendor/k8s.io/api/events/v1/types.go create mode 100644 vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/events/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/events/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/well_known_annotations.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/node/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/rbac/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/.import-restrictions create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/.import-restrictions create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/cache/cache.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go create mode 100644 vendor/k8s.io/autoscaler/LICENSE create mode 100644 vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2/doc.go rename vendor/k8s.io/{api/auditregistration/v1alpha1 => autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2}/register.go (74%) create mode 100644 vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2/types.go create mode 100644 vendor/k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go rename vendor/k8s.io/client-go/kubernetes/typed/{auditregistration/v1alpha1/auditregistration_client.go => certificates/v1/certificates_client.go} (58%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go rename vendor/k8s.io/client-go/kubernetes/typed/{auditregistration/v1alpha1 => events/v1}/generated_expansion.go (92%) rename vendor/k8s.io/client-go/kubernetes/typed/{auditregistration => flowcontrol}/v1alpha1/doc.go (100%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go create mode 100644 vendor/k8s.io/client-go/metadata/interface.go create mode 100644 vendor/k8s.io/client-go/metadata/metadata.go create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go create mode 100644 vendor/k8s.io/client-go/rest/warnings.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/resourcelock/multilock.go create mode 100644 vendor/k8s.io/client-go/tools/portforward/doc.go create mode 100644 vendor/k8s.io/client-go/tools/portforward/portforward.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/doc.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/errorstream.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/reader.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go rename vendor/k8s.io/client-go/{kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go => tools/remotecommand/resize.go} (50%) create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v1.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v2.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v3.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v4.go create mode 100644 vendor/k8s.io/client-go/transport/cert_rotation.go create mode 100644 vendor/k8s.io/client-go/transport/spdy/spdy.go create mode 100644 vendor/k8s.io/client-go/util/cert/server_inspection.go create mode 100644 vendor/k8s.io/client-go/util/exec/exec.go create mode 100644 vendor/k8s.io/component-base/LICENSE create mode 100644 vendor/k8s.io/component-base/config/OWNERS rename vendor/{sigs.k8s.io/controller-runtime/pkg/manager/testutil.go => k8s.io/component-base/config/doc.go} (64%) create mode 100644 vendor/k8s.io/component-base/config/types.go create mode 100644 vendor/k8s.io/component-base/config/v1alpha1/conversion.go create mode 100644 vendor/k8s.io/component-base/config/v1alpha1/defaults.go create mode 100644 vendor/k8s.io/component-base/config/v1alpha1/doc.go create mode 100644 vendor/k8s.io/component-base/config/v1alpha1/register.go create mode 100644 vendor/k8s.io/component-base/config/v1alpha1/types.go create mode 100644 vendor/k8s.io/component-base/config/v1alpha1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/component-base/config/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/component-base/config/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/klog/v2/.gitignore create mode 100644 vendor/k8s.io/klog/v2/CONTRIBUTING.md create mode 100644 vendor/k8s.io/klog/v2/LICENSE create mode 100644 vendor/k8s.io/klog/v2/OWNERS create mode 100644 vendor/k8s.io/klog/v2/README.md create mode 100644 vendor/k8s.io/klog/v2/RELEASE.md create mode 100644 vendor/k8s.io/klog/v2/SECURITY_CONTACTS create mode 100644 vendor/k8s.io/klog/v2/code-of-conduct.md create mode 100644 vendor/k8s.io/klog/v2/go.mod create mode 100644 vendor/k8s.io/klog/v2/go.sum create mode 100644 vendor/k8s.io/klog/v2/klog.go create mode 100644 vendor/k8s.io/klog/v2/klog_file.go create mode 100644 vendor/k8s.io/kube-aggregator/LICENSE rename vendor/k8s.io/{client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go => kube-aggregator/pkg/apis/apiregistration/doc.go} (53%) create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/register.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go rename vendor/k8s.io/{client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go => kube-aggregator/pkg/apis/apiregistration/v1/defaults.go} (52%) create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go rename vendor/k8s.io/{api/auditregistration/v1alpha1 => kube-aggregator/pkg/apis/apiregistration/v1}/generated.pb.go (56%) create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/register.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/defaults.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/doc.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/register.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.prerelease-lifecycle.go rename vendor/k8s.io/{api/auditregistration/v1alpha1 => kube-aggregator/pkg/apis/apiregistration}/zz_generated.deepcopy.go (62%) create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/clientset.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/doc.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/doc.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/register.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiregistration_client.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/doc.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/generated_expansion.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiregistration_client.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/apiservice.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/doc.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/utils/trace/README.md create mode 100644 vendor/sigs.k8s.io/controller-runtime/Makefile create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/ginkgo.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/printer/prow.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go rename vendor/sigs.k8s.io/{testing_frameworks => controller-runtime/pkg/internal/testing}/integration/.gitignore (100%) create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/README.md create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/addr/manager.go rename vendor/sigs.k8s.io/{testing_frameworks => controller-runtime/pkg/internal/testing}/integration/apiserver.go (77%) rename vendor/sigs.k8s.io/{testing_frameworks => controller-runtime/pkg/internal/testing}/integration/control_plane.go (56%) rename vendor/sigs.k8s.io/{testing_frameworks => controller-runtime/pkg/internal/testing}/integration/doc.go (89%) rename vendor/sigs.k8s.io/{testing_frameworks => controller-runtime/pkg/internal/testing}/integration/etcd.go (93%) create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/apiserver.go rename vendor/sigs.k8s.io/{testing_frameworks => controller-runtime/pkg/internal/testing}/integration/internal/arguments.go (87%) rename vendor/sigs.k8s.io/{testing_frameworks => controller-runtime/pkg/internal/testing}/integration/internal/bin_path_finder.go (100%) rename vendor/sigs.k8s.io/{testing_frameworks => controller-runtime/pkg/internal/testing}/integration/internal/etcd.go (58%) rename vendor/sigs.k8s.io/{testing_frameworks => controller-runtime/pkg/internal/testing}/integration/internal/process.go (85%) create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/integration/internal/tinyca.go rename vendor/sigs.k8s.io/{testing_frameworks => controller-runtime/pkg/internal/testing}/integration/kubectl.go (94%) create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/flags.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/manager/client_builder.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/doc.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/ratelimiter.go rename vendor/sigs.k8s.io/{testing_frameworks => structured-merge-diff/v4}/LICENSE (99%) create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/allocator.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/doc.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/fields.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/jsontagutil.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/list.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/listreflect.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/listunstructured.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/map.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/scalar.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/structreflect.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/value.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/valuereflect.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/value/valueunstructured.go delete mode 100644 vendor/sigs.k8s.io/testing_frameworks/integration/README.md delete mode 100644 vendor/sigs.k8s.io/testing_frameworks/integration/addr/manager.go delete mode 100644 vendor/sigs.k8s.io/testing_frameworks/integration/internal/apiserver.go create mode 100644 vendor/sigs.k8s.io/yaml/go.mod create mode 100644 vendor/sigs.k8s.io/yaml/go.sum diff --git a/.gitignore b/.gitignore index fc52ed42a..1e4c4fd26 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,5 @@ config.vmdk .virtualgo .fuse_hidden* +.cache_ggshield + diff --git a/api/v1alpha1/suite_test.go b/api/v1alpha1/suite_test.go index befef79f6..d5355c5be 100644 --- a/api/v1alpha1/suite_test.go +++ b/api/v1alpha1/suite_test.go @@ -27,6 +27,7 @@ import ( "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" ) @@ -34,20 +35,22 @@ import ( // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. -var cfg *rest.Config -var k8sClient client.Client -var testEnv *envtest.Environment +var ( + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment +) func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) RunSpecsWithDefaultAndCustomReporters(t, "v1 Suite", - []Reporter{envtest.NewlineReporter{}}) + []Reporter{printer.NewlineReporter{}}) } var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) By("bootstrapping test environment") testEnv = &envtest.Environment{ diff --git a/config/crd/bases/druid.gardener.cloud_etcds.yaml b/config/crd/bases/druid.gardener.cloud_etcds.yaml index 07d8eb2b1..88f293f14 100644 --- a/config/crd/bases/druid.gardener.cloud_etcds.yaml +++ b/config/crd/bases/druid.gardener.cloud_etcds.yaml @@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.2.4 + controller-gen.kubebuilder.io/version: v0.2.5 creationTimestamp: null name: etcds.druid.gardener.cloud spec: @@ -56,9 +56,13 @@ spec: delta snapshots of etcd properties: deltaSnapshotMemoryLimit: + anyOf: + - type: integer + - type: string description: DeltaSnapshotMemoryLimit defines the memory limit after which delta snapshots will be taken - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true deltaSnapshotPeriod: description: DeltaSnapshotPeriod defines the period after which delta snapshots will be taken @@ -92,13 +96,21 @@ spec: properties: limits: additionalProperties: - type: string + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' type: object requests: additionalProperties: - type: string + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise @@ -215,21 +227,33 @@ spec: - extensive type: string quota: + anyOf: + - type: integer + - type: string description: Quota defines the etcd DB quota. - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true resources: description: 'Resources defines the compute Resources required by etcd container. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' properties: limits: additionalProperties: - type: string + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' type: object requests: additionalProperties: - type: string + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise @@ -343,8 +367,12 @@ spec: type: object type: object storageCapacity: + anyOf: + - type: integer + - type: string description: StorageCapacity defines the size of persistent volume. - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true storageClass: description: 'StorageClass defines the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml index 85093a8c2..7aee2660f 100644 --- a/config/rbac/leader_election_role.yaml +++ b/config/rbac/leader_election_role.yaml @@ -8,6 +8,7 @@ rules: - "" resources: - configmaps + - leases verbs: - get - list @@ -16,11 +17,3 @@ rules: - update - patch - delete -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - update - - patch diff --git a/controllers/controller_ref_manager.go b/controllers/controller_ref_manager.go index cde57606d..66a4c1480 100644 --- a/controllers/controller_ref_manager.go +++ b/controllers/controller_ref_manager.go @@ -35,7 +35,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -78,7 +77,7 @@ func (m *BaseControllerRefManager) CanAdopt() error { // own the object. // // No reconciliation will be attempted if the controller is being deleted. -func (m *BaseControllerRefManager) claimObject(obj metav1.Object, match func(metav1.Object) bool, adopt, release func(metav1.Object) error) (bool, error) { +func (m *BaseControllerRefManager) claimObject(obj client.Object, match func(metav1.Object) bool, adopt, release func(client.Object) error) (bool, error) { controllerRef := metav1.GetControllerOf(obj) if controllerRef != nil { if controllerRef.UID != m.Controller.GetUID() { @@ -329,12 +328,12 @@ func (m *EtcdDruidRefManager) ClaimConfigMaps(cms *corev1.ConfigMapList, filters // AdoptResource sends a patch to take control of the Etcd. It returns the error if // the patching fails. -func (m *EtcdDruidRefManager) AdoptResource(obj metav1.Object) error { +func (m *EtcdDruidRefManager) AdoptResource(obj client.Object) error { if err := m.CanAdopt(); err != nil { return fmt.Errorf("can't adopt resource %v/%v (%v): %v", obj.GetNamespace(), obj.GetName(), obj.GetUID(), err) } - var clone metav1.Object + var clone client.Object switch objType := obj.(type) { case *appsv1.StatefulSet: clone = obj.(*appsv1.StatefulSet).DeepCopy() @@ -364,13 +363,13 @@ func (m *EtcdDruidRefManager) AdoptResource(obj metav1.Object) error { return fmt.Errorf("cannot adopt resource: %s", objType) } - return m.reconciler.Patch(context.TODO(), clone.(runtime.Object), client.MergeFrom(obj.(runtime.Object))) + return m.reconciler.Patch(context.TODO(), clone, client.MergeFrom(obj)) } // ReleaseResource sends a patch to free the resource from the control of the controller. // It returns the error if the patching fails. 404 and 422 errors are ignored. -func (m *EtcdDruidRefManager) ReleaseResource(obj metav1.Object) error { - var clone metav1.Object +func (m *EtcdDruidRefManager) ReleaseResource(obj client.Object) error { + var clone client.Object switch objType := obj.(type) { case *appsv1.StatefulSet: clone = obj.(*appsv1.StatefulSet).DeepCopy() @@ -389,7 +388,7 @@ func (m *EtcdDruidRefManager) ReleaseResource(obj metav1.Object) error { m.disown(clone) - err := client.IgnoreNotFound(m.reconciler.Patch(context.TODO(), clone.(runtime.Object), client.MergeFrom(obj.(runtime.Object)))) + err := client.IgnoreNotFound(m.reconciler.Patch(context.TODO(), clone, client.MergeFrom(obj))) if errors.IsInvalid(err) { // Invalid error will be returned in two cases: 1. the etcd // has no owner reference, 2. the uid of the etcd doesn't diff --git a/controllers/etcd_controller.go b/controllers/etcd_controller.go index e8b01d453..e8d851f61 100644 --- a/controllers/etcd_controller.go +++ b/controllers/etcd_controller.go @@ -170,8 +170,7 @@ func (r *EtcdReconciler) InitializeControllerWithImageVector() (*EtcdReconciler, // +kubebuilder:rbac:groups=druid.gardener.cloud,resources=etcds/status,verbs=get;update;patch // Reconcile reconciles the etcd. -func (r *EtcdReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { - ctx := context.TODO() +func (r *EtcdReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { etcd := &druidv1alpha1.Etcd{} if err := r.Get(ctx, req.NamespacedName, etcd); err != nil { if errors.IsNotFound(err) { diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 5867adaa4..05150c62c 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -15,6 +15,7 @@ package controllers import ( + "context" "path/filepath" "sync" "testing" @@ -32,25 +33,27 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" "sigs.k8s.io/controller-runtime/pkg/log/zap" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" // +kubebuilder:scaffold:imports ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. -var cfg *rest.Config -var k8sClient client.Client -var testEnv *envtest.Environment -var mgr manager.Manager -var recFn reconcile.Reconciler -var requests chan reconcile.Request -var stopMgr chan struct{} -var mgrStopped *sync.WaitGroup var ( + mgrCtx context.Context + mgrCancel context.CancelFunc + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + mgr manager.Manager + recFn reconcile.Reconciler + requests chan reconcile.Request + mgrStopped *sync.WaitGroup + testLog = ctrl.Log.WithName("test") ) @@ -59,13 +62,14 @@ func TestAPIs(t *testing.T) { RunSpecsWithDefaultAndCustomReporters(t, "Controller Suite", - []Reporter{envtest.NewlineReporter{}}) + []Reporter{printer.NewlineReporter{}}) } var _ = BeforeSuite(func(done Done) { + mgrCtx, mgrCancel = context.WithCancel(context.Background()) var err error //logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - ctrl.SetLogger(zap.Logger(true)) + ctrl.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) By("bootstrapping test environment") testEnv = &envtest.Environment{ CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, @@ -102,26 +106,25 @@ var _ = BeforeSuite(func(done Done) { err = er.SetupWithManager(mgr, 1, true) Expect(err).NotTo(HaveOccurred()) - stopMgr, mgrStopped = startTestManager(mgr) + mgrStopped = startTestManager(mgrCtx, mgr) close(done) }, 60) var _ = AfterSuite(func() { - close(stopMgr) + mgrCancel() mgrStopped.Wait() Expect(testEnv.Stop()).To(Succeed()) }) -func startTestManager(mgr manager.Manager) (chan struct{}, *sync.WaitGroup) { - stop := make(chan struct{}) +func startTestManager(ctx context.Context, mgr manager.Manager) *sync.WaitGroup { wg := &sync.WaitGroup{} go func() { wg.Add(1) - Expect(mgr.Start(stop)).NotTo(HaveOccurred()) + Expect(mgr.Start(ctx)).NotTo(HaveOccurred()) wg.Done() }() - return stop, wg + return wg } func SetupWithManager(mgr ctrl.Manager, r reconcile.Reconciler) error { diff --git a/go.mod b/go.mod index 3bf15e666..db1ae96fb 100644 --- a/go.mod +++ b/go.mod @@ -1,29 +1,28 @@ module github.com/gardener/etcd-druid -go 1.13 +go 1.15 require ( - github.com/gardener/gardener v1.3.1 + github.com/gardener/gardener v1.17.1 github.com/ghodss/yaml v1.0.0 - github.com/huandu/xstrings v1.3.1 // indirect - github.com/onsi/ginkgo v1.10.1 - github.com/onsi/gomega v1.7.0 - github.com/sirupsen/logrus v1.4.2 - k8s.io/api v0.17.0 - k8s.io/apimachinery v0.17.0 + github.com/onsi/ginkgo v1.14.1 + github.com/onsi/gomega v1.10.5 + github.com/sirupsen/logrus v1.6.0 + k8s.io/api v0.19.6 + k8s.io/apimachinery v0.19.6 k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible k8s.io/helm v2.16.1+incompatible - k8s.io/utils v0.0.0-20200327001022-6496210b90e8 - sigs.k8s.io/controller-runtime v0.4.0 + k8s.io/utils v0.0.0-20200912215256-4140de9c8800 + sigs.k8s.io/controller-runtime v0.7.1 ) replace ( - k8s.io/api => k8s.io/api v0.16.8 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.16.8 - k8s.io/apimachinery => k8s.io/apimachinery v0.16.8 - k8s.io/client-go => k8s.io/client-go v0.16.8 - k8s.io/code-generator => k8s.io/code-generator v0.16.8 + k8s.io/api => k8s.io/api v0.19.6 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.19.6 + k8s.io/apimachinery => k8s.io/apimachinery v0.19.6 + k8s.io/client-go => k8s.io/client-go v0.19.6 + k8s.io/code-generator => k8s.io/code-generator v0.19.6 k8s.io/helm => k8s.io/helm v2.13.1+incompatible - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.16.8 - k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.19.6 + k8s.io/kube-openapi => github.com/gardener/kube-openapi v0.0.0-20201221124747-75e88872edcf // k8s-1.19 ) diff --git a/go.sum b/go.sum index 64162a6c7..5fe26c9a2 100644 --- a/go.sum +++ b/go.sum @@ -8,6 +8,8 @@ cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTj cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0 h1:0E3eE8MX426vUOs7aHfI7aN1BrIzzzf4ccKCSfSjGmc= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0 h1:PvKAVQWCtlGUSlZkGW3QLelKaWq7KYv/MW1EboG8bfM= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -15,12 +17,16 @@ cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v39.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v42.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= @@ -40,6 +46,7 @@ github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy86 github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/sprig v2.16.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -49,7 +56,9 @@ github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ahmetb/gen-crd-api-reference-docs v0.1.5/go.mod h1:P/XzJ+c2+khJKNKABcm2biRwk2QAuwbLf8DlXuaL7WM= +github.com/ahmetb/gen-crd-api-reference-docs v0.2.0/go.mod h1:P/XzJ+c2+khJKNKABcm2biRwk2QAuwbLf8DlXuaL7WM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -58,6 +67,9 @@ github.com/alessio/shellescape v0.0.0-20190409004728-b115ca0f9053/go.mod h1:xW8s github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20180828111155-cad214d7d71f/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190603021944-12ad9f921c0b/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= +github.com/appscode/jsonpatch v1.0.1/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -69,14 +81,21 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cloudflare-go v0.11.4/go.mod h1:ZB+hp7VycxPLpp0aiozQQezat46npDXhzHi1DVtRCn4= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -91,14 +110,16 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -107,39 +128,71 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8 github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 h1:pEtiCjIXx3RvGjlUJuCNxNOw0MNblyR9Wi+vJGBFh+8= github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= +github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2 h1:dWB6v3RcOy03t/bUadywsbyrQwCqZeNIEX6M1OtSZOM= github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.6+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7-0.20200730005029-803dd64f0468/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= +github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/frankban/quicktest v1.5.0/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/gardener/controller-manager-library v0.1.1-0.20191212112146-917449ad760c/go.mod h1:v6cbldxmpL2fYBEB2lSnq3LSEPwIHus9En6iIhwNE1k= github.com/gardener/controller-manager-library v0.1.1-0.20200204110458-c263b9bb97ad/go.mod h1:v6cbldxmpL2fYBEB2lSnq3LSEPwIHus9En6iIhwNE1k= +github.com/gardener/controller-manager-library v0.2.1-0.20200810091329-d980dbe10959/go.mod h1:XMp1tPcX3SP/dMd+3id418f5Cqu44vydeTkBRbW8EvQ= github.com/gardener/etcd-druid v0.1.12/go.mod h1:yZrUQY9clD8/ZXK+MmEq8OS1TaKJeipV0u4kHHrwWeY= +github.com/gardener/etcd-druid v0.1.15/go.mod h1:BHXG8N04Dl4On7Ie6cErwmpvzncNrmeb+HO7Sqrhf+A= +github.com/gardener/etcd-druid v0.3.0/go.mod h1:uxZjZ57gIgpX554vGp495g2i8DByoS3OkVtiqsxtbwk= github.com/gardener/external-dns-management v0.7.3/go.mod h1:Y3om11E865x4aQ7cmcHjknb8RMgCO153huRb/SvP+9o= github.com/gardener/external-dns-management v0.7.7/go.mod h1:egCe/FPOsUbXA4WV0ne3h7nAD/nLT09hNt/FQQXK+ec= +github.com/gardener/external-dns-management v0.7.18 h1:15uIyFfZSbR8fivnXvqb1Dvv4QqzfNYxEFUQ9K+mpsE= +github.com/gardener/external-dns-management v0.7.18/go.mod h1:oHhauLQ3/sop0c1urS6n304Wqv/WM4me0geLn9nTAcY= github.com/gardener/gardener v1.1.2/go.mod h1:CP9I0tCDVXTLPkJv/jUtXVUh948kSNKEGUg0haLz9gk= github.com/gardener/gardener v1.3.1 h1:5uCR5rNrhOBrB8urBi/ed1w+GhzxU2qXNDh4cYXfSjc= github.com/gardener/gardener v1.3.1/go.mod h1:936P5tQbg6ViiW8BVC9ELM95sFrk4DgobKrxMNtn/LU= +github.com/gardener/gardener v1.4.1-0.20200519155656-a8ccc6cc779a/go.mod h1:t9oESM37bAMIuezi9I0H0I8+++8jy8BUPitcf4ERRXY= +github.com/gardener/gardener v1.11.3/go.mod h1:5DzqfOm+G8UftKu5zUbYJ+9Cnfd4XrvRNDabkM9AIp4= +github.com/gardener/gardener v1.17.1 h1:yGYYa8b7j5gD1PzbjQafhSHW69/D8LHzrfls8Qc2mP0= +github.com/gardener/gardener v1.17.1/go.mod h1:uucRHq0xV46xd9MpJJjRswx/Slq3+ipbbJg09FVUtvM= github.com/gardener/gardener-resource-manager v0.10.0 h1:6OUKoWI3oha42F0oJN8OEo3UR+D3onOCel4Th+zgotU= github.com/gardener/gardener-resource-manager v0.10.0/go.mod h1:0pKTHOhvU91eQB0EYr/6Ymd7lXc/5Hi8P8tF/gpV0VQ= +github.com/gardener/gardener-resource-manager v0.13.1/go.mod h1:0No/XttYRUwDn5lSppq9EqlKdo/XJQ44aCZz5BVu3Vw= +github.com/gardener/gardener-resource-manager v0.18.0 h1:bNB0yKhSqe8DnsvIp3xZr9nsFB4fm+AUAqj1EoIvWU8= +github.com/gardener/gardener-resource-manager v0.18.0/go.mod h1:k53Yw2iDAIpTxnChQY9qFHrRtuPQWJDNnCP9eE6TnWQ= github.com/gardener/hvpa-controller v0.0.0-20191014062307-fad3bdf06a25 h1:nOFITmV7vt4fcYPEXgj66Qs83FdDEMvL/LQcR0diRRE= github.com/gardener/hvpa-controller v0.0.0-20191014062307-fad3bdf06a25/go.mod h1:yj7YJ6ijo4adcpXQKutPFZfQuKLdM5UMZZUlpbM3vig= +github.com/gardener/hvpa-controller v0.2.5/go.mod h1:rjsb3BPKJFMluudZ8/bhCCDQfFCF/0Um+rzXQI/MmfI= +github.com/gardener/hvpa-controller v0.3.1 h1:VsOdcKZMcZDlUNVbFY8oqlKrb1GSCdmzPooKT/Tyi+Y= +github.com/gardener/hvpa-controller v0.3.1/go.mod h1:rjsb3BPKJFMluudZ8/bhCCDQfFCF/0Um+rzXQI/MmfI= +github.com/gardener/kube-openapi v0.0.0-20201221124747-75e88872edcf h1:JYF72/j/sjvXrqnClgqHuICiIyqw3Ss/IPuOeoTIUjE= +github.com/gardener/kube-openapi v0.0.0-20201221124747-75e88872edcf/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= github.com/gardener/machine-controller-manager v0.27.0/go.mod h1:zlIxuLQMtRO+aXOFsG6qtYkBmggbWY82K7MSO051ARU= +github.com/gardener/machine-controller-manager v0.33.0/go.mod h1:jxxE+mGgXwg4iPlCHTG4GtUfK2CcHA6yYoIIowoxOZU= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 h1:ZktWZesgun21uEDrwW7iEV1zPCGQldM2atlJZ3TdvVM= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= @@ -154,14 +207,20 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs= +github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v0.1.1 h1:qXBXPDdNncunGs7XeEpsJt8wCjYBygluzfdLO0G5baE= github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4= +github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= @@ -174,19 +233,24 @@ github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1 github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= @@ -195,19 +259,23 @@ github.com/go-openapi/swag v0.19.4/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= +github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/gobuffalo/logger v1.0.0/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= github.com/gobuffalo/packr v1.30.1/go.mod h1:ljMyFO2EcrnzsHsN99cvbq055Y9OhRrIaviy289eRuk= github.com/gobuffalo/packr/v2 v2.5.1/go.mod h1:8f9c96ITobJlPzI44jj+4tHnEKNt0xXWSVlXRN9X1Iw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= @@ -225,12 +293,23 @@ github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/mock v1.4.4-0.20200731163441-8734ec565a4d h1:izNDUPqGqkeNSYeebPs9caowE15dhr4m59/68323beM= +github.com/golang/mock v1.4.4-0.20200731163441-8734ec565a4d/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -239,13 +318,18 @@ github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= @@ -254,25 +338,31 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/gophercloud/gophercloud v0.0.0-20190212181753-892256c46858/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.6.1-0.20191122030953-d8ac278c1c9d/go.mod h1:ozGNgr9KYOVATV5jsgHl/ceCDXGuguqOZAzoQ/2vcNM= github.com/gophercloud/gophercloud v0.7.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gophercloud/utils v0.0.0-20190527093828-25f1b77b8c03/go.mod h1:SZ9FTKibIotDtCrxAU/evccoyu1yhKST6hgBvwTB5Eg= github.com/gophercloud/utils v0.0.0-20200204043447-9864b6f1f12f/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.11.3/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -285,26 +375,34 @@ github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/huandu/xstrings v1.3.0 h1:gvV6jG9dTgFEncxo+AF7PH6MZXi/vZl25owA/8Dg8Wo= github.com/huandu/xstrings v1.3.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/infobloxopen/infoblox-go-client v1.1.0/go.mod h1:BXiw7S2b9qJoM8MS40vfgCNB2NLHGusk1DtO16BD9zI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= @@ -312,7 +410,10 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= @@ -325,9 +426,13 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGi github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= @@ -339,13 +444,19 @@ github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU= github.com/miekg/dns v1.1.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= @@ -356,18 +467,22 @@ github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/I github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -375,12 +490,23 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= +github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= +github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/packethost/packngo v0.0.0-20181217122008-b3b45f1b4979/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= @@ -394,7 +520,6 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= @@ -407,12 +532,16 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.3.0 h1:miYCvYqFXtl/J9FIy8eNpBfYthAEFg+Ys0XyUVEcDsc= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0 h1:ElTg5tNp4DqfV7UQjDqv2+RJlNzsDtvNAWccbItceIE= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= @@ -422,6 +551,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= @@ -431,8 +562,10 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= @@ -441,10 +574,13 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= @@ -454,10 +590,13 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -465,10 +604,10 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= @@ -476,13 +615,17 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/texttheater/golang-levenshtein v0.0.0-20191208221605-eb6844b05fc6/go.mod h1:XDKHRm5ThF8YJjx001LtgelzsoaEcvnA7lVWz9EeX3g= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.3/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= @@ -490,9 +633,16 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1: github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -500,20 +650,29 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1 h1:rsqfU5vBkVknbhUGbAUwQKR2H4ItV8tjJ+6kJX4cxHM= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -523,6 +682,7 @@ golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -531,13 +691,14 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -549,6 +710,8 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -556,7 +719,8 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -578,6 +742,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -586,6 +752,11 @@ golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= @@ -598,7 +769,6 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -622,36 +792,51 @@ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8 h1:JA8d3MPx/IToSyXZG/RhwYEtfrKO1Fxrqe8KrkiLXKM= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d h1:62ap6LNOjDU6uGmKXHJbSfciMoV+FeI1sRXx/pLDL44= golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190213192042-740235f6c0d8/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -659,12 +844,15 @@ golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190501045030-23463209683d/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624180213-70d37148ca0c/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -672,15 +860,22 @@ golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191127201027-ecd32218bd7f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191230220329-2aa90c603ae3 h1:2+KluhQfJ1YhW+TB1KrISS2SfiG1pLEoseB0D4VF/bo= golang.org/x/tools v0.0.0-20191230220329-2aa90c603ae3/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200408032209-46bd65c8538f h1:rsqLea9h7rgdpXI+GnnbQkptKSzOybXLjn3gqcL2XZ0= golang.org/x/tools v0.0.0-20200408032209-46bd65c8538f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200422205258-72e4a01eba43/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616195046-dc31b401abb5 h1:UaoXseXAWUJUcuJ2E2oczJdLxAJXL0lOmVaBl7kuk+I= +golang.org/x/tools v0.0.0-20200616195046-dc31b401abb5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -689,15 +884,15 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T gomodules.xyz/jsonpatch/v2 v2.0.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= -gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k= +gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.14.0 h1:uMf5uLi4eQMRrMKhCplNik4U4H8Z6C1br3zOtAa/aDE= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= @@ -705,6 +900,8 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -715,18 +912,35 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1 h1:aQktFqmDE2yjveXJlVIfslDFmFnUXSqG0i6KRcJAeMc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -744,76 +958,118 @@ gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.16.8 h1:T72itM0CUT8KHqPAqbjTeSY0n24RyVM71nLiMlq/cAw= -k8s.io/api v0.16.8/go.mod h1:a8EOdYHO8en+YHhPBLiW5q+3RfHTr7wxTqqp7emJ7PM= -k8s.io/apiextensions-apiserver v0.16.8 h1:Dei7K1m2c539CYCfpvVPLdRin5CF2C2rbZ0lzuprUFs= -k8s.io/apiextensions-apiserver v0.16.8/go.mod h1:R989okvPLerXtLjflp2PROO8mNH+rdW9WAxlD/aQcmw= -k8s.io/apimachinery v0.16.8 h1:wgFRqtel3w3rcclpba+iBkVlKeBlh42OzNp7FalXVCg= -k8s.io/apimachinery v0.16.8/go.mod h1:Xk2vD2TRRpuWYLQNM6lT9R7DSFZUYG03SarNkbGrnKE= +istio.io/api v0.0.0-20201123152548-197f11e4ea09 h1:+3Q/a15sTRDAYN66nM+bqCDF1MsynIEpWm9CKPdPOPg= +istio.io/api v0.0.0-20201123152548-197f11e4ea09/go.mod h1:88HN3o1fSD1jo+Z1WTLlJfMm9biopur6Ct9BFKjiB64= +istio.io/client-go v1.8.1 h1:gIeAQBgXkyUuvQ5LUtvxIY5TdFuaCLZqXgxF3bigKww= +istio.io/client-go v1.8.1/go.mod h1:Qymv71lwIqjDTkaE2NqBYLL+Bl5KsCfzEDhntXypHYY= +istio.io/gogo-genproto v0.0.0-20190930162913-45029607206a h1:w7zILua2dnYo9CxImhpNW4NE/8ZxEoc/wfBfHrhUhrE= +istio.io/gogo-genproto v0.0.0-20190930162913-45029607206a/go.mod h1:OzpAts7jljZceG4Vqi5/zXy/pOg1b209T3jb7Nv5wIs= +k8s.io/api v0.19.6 h1:F3lfwgpKcKms6F1mMqkQXFzXmme8QqHTJBtBkev3TOg= +k8s.io/api v0.19.6/go.mod h1:Plxx44Nh4zVblkJrIgxVPgPre1mvng6tXf1Sj3bs0fU= +k8s.io/apiextensions-apiserver v0.19.6 h1:LL7H65E2VTBfxmsWQZth60zzWVtbSN2gWMEWfsuDvIQ= +k8s.io/apiextensions-apiserver v0.19.6/go.mod h1:9s8ceL67UJAD1ewbsn07tkQ7/EGjiKOedKyiUCVXJgQ= +k8s.io/apimachinery v0.19.6 h1:kBLzSGuDdY1NdSV2uFzI+FwZ9wtkmG+X3ZVcWXSqNgA= +k8s.io/apimachinery v0.19.6/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q= k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= k8s.io/apiserver v0.0.0-20191010014313-3893be10d307/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w= k8s.io/apiserver v0.16.8/go.mod h1:FzppK0uWsEZnS/FUCOTLQuMRSItRGYstLxTx4nU1Wc4= +k8s.io/apiserver v0.18.8/go.mod h1:12u5FuGql8Cc497ORNj79rhPdiXQC4bf53X/skR/1YM= +k8s.io/apiserver v0.19.6/go.mod h1:05XquZxCDzQ27ebk7uV2LrFIK4lm5Yt47XkkUvLAoAM= +k8s.io/autoscaler v0.0.0-20190805135949-100e91ba756e h1:5AX59ZgftHpbmNupSWosdtW4q/rCnF4s/0J0dEfJkAQ= k8s.io/autoscaler v0.0.0-20190805135949-100e91ba756e/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA= -k8s.io/client-go v0.16.8 h1:CmsQXJpSWq1aUyQ5Lp/rRPiMK2OYfJv32Ftl0D1D42U= -k8s.io/client-go v0.16.8/go.mod h1:WmPuN0yJTKHXoklExKxzo3jSXmr3EnN+65uaTb5VuNs= +k8s.io/client-go v0.19.6 h1:vtPb33nP8DBMW+/CyuJ8fiie36c3CM1Ts6L4Tsr+PtU= +k8s.io/client-go v0.19.6/go.mod h1:gEiS+efRlXYUEQ9Oz4lmNXlxAl5JZ8y2zbTDGhvXXnk= k8s.io/cluster-bootstrap v0.0.0-20190918163108-da9fdfce26bb/go.mod h1:mQVbtFRxlw/BzBqBaQwIMzjDTST1KrGtzWaR4CGlsTU= k8s.io/cluster-bootstrap v0.16.8/go.mod h1:fT1U/qWmXNmIColCsCBg4G881nWFaEqONL0xmP48rkI= -k8s.io/code-generator v0.16.8/go.mod h1:wFdrXdVi/UC+xIfLi+4l9elsTT/uEF61IfcN2wOLULQ= +k8s.io/cluster-bootstrap v0.18.8/go.mod h1:guq0Uc+QwazHgpS1yAw5Z7yUlBCtGppbgWQkbN3lxIY= +k8s.io/cluster-bootstrap v0.19.6 h1:ZtOoKjWEZP6BEuDd55B3sHTjneutv0z1oh3UfWiKxpc= +k8s.io/cluster-bootstrap v0.19.6/go.mod h1:9Ft1ED2O3k+4+gtkkth/Y0qHCdi9y+IMI8wh4HszXi4= +k8s.io/code-generator v0.19.6/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= k8s.io/component-base v0.16.8/go.mod h1:Q8UWOWShpP3MZZny4n/15gOncfaaVtc9SbCdkM5MhUE= +k8s.io/component-base v0.18.8/go.mod h1:00frPRDas29rx58pPCxNkhUfPbwajlyyvu8ruNgSErU= +k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= +k8s.io/component-base v0.19.6 h1:V76d3rIEWvP95peWgRycKslQnEwlaPy4UORvh3+YBbU= +k8s.io/component-base v0.19.6/go.mod h1:8Btsf8J00/fVDa/YFmXjei7gVkcFrlKZXjSeP4SZNJg= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190826232639-a874a240740c/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/helm v2.13.1+incompatible h1:qt0LBsHQ7uxCtS3F2r3XI0DNm8ml0xQeSJixUorDyn0= k8s.io/helm v2.13.1+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-aggregator v0.16.8/go.mod h1:l73g+bVdjrgDz9nrISk6AgupGbv1n+4WjTbGaXz/YvI= -k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf h1:EYm5AW/UUDbnmnI+gK0TJDVK9qPLhM+sRHYanNKw0EQ= -k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-aggregator v0.19.6 h1:huAkb9MZVN56gQ5fXe0eckFF6pbt167tPU6wkIMpiV8= +k8s.io/kube-aggregator v0.19.6/go.mod h1:BeD33Jp5LLaDH4t9oh1B+LkOY9D5+xhAC8I3ZSvI6m0= k8s.io/kubelet v0.16.8/go.mod h1:mzDpnryQg2dlB6V3/WAgb1baIamiICtWpXMFrPOFh6I= +k8s.io/kubelet v0.18.8/go.mod h1:6z1jHCk0NPE6WshFStfqcgQ1bnD3tetcPmhC2915aio= +k8s.io/kubelet v0.19.6/go.mod h1:/yashsvRBHMGFnxpmTjtaI0sJ4rLJno9zXzc6PPU8Ls= k8s.io/metrics v0.0.0-20191004105854-2e8cf7d0888c/go.mod h1:a25VAbm3QT3xiVl1jtoF1ueAKQM149UdZ+L93ePfV3M= k8s.io/metrics v0.16.8/go.mod h1:uBIJKJKdga8vL76a1dl+eRlUqOAdCbBpvFHC28SbUIY= +k8s.io/metrics v0.18.8/go.mod h1:j7JzZdiyhLP2BsJm/Fzjs+j5Lb1Y7TySjhPWqBPwRXA= +k8s.io/metrics v0.19.6/go.mod h1:jM61saf/bjMRmow6zan2cAk8vFDmqvbNXFRbB4g7TNs= k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191218082557-f07c713de883 h1:TA8t8OLS8m3/0dtTckekO0pCQ7qMnD19fsZTQEgCSKQ= k8s.io/utils v0.0.0-20191218082557-f07c713de883/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200327001022-6496210b90e8 h1:6JFbaLjRyBz8K2Jvt+pcT+N3vvwMZfg8MfVENwe9aag= k8s.io/utils v0.0.0-20200327001022-6496210b90e8/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= -modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20200619165400-6e3d28b6ed19/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20200912215256-4140de9c8800 h1:9ZNvfPvVIEsp/T1ez4GQuzCcCTEQWhovSofhqR73A6g= +k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= sigs.k8s.io/controller-runtime v0.2.0-beta.5/go.mod h1:HweyYKQ8fBuzdu2bdaeBJvsFgAi/OqBBnrVGXcqKhME= sigs.k8s.io/controller-runtime v0.4.0 h1:wATM6/m+3w8lj8FXNaO6Fs/rq/vqoOjO1Q116Z9NPsg= sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZwfXkME1Lajns= +sigs.k8s.io/controller-runtime v0.6.3/go.mod h1:WlZNXcM0++oyaQt4B7C2lEE5JYRs8vJUzRP4N4JpdAY= +sigs.k8s.io/controller-runtime v0.7.1 h1:nqVwzVzdenfd9xIbB35pC7JJH2IXVL4hDo3MNzkyCh4= +sigs.k8s.io/controller-runtime v0.7.1/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU= +sigs.k8s.io/controller-tools v0.2.0-beta.4/go.mod h1:8t/X+FVWvk6TaBcsa+UKUBbn7GMtvyBKX30SGl4em6Y= sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA= +sigs.k8s.io/controller-tools v0.2.9/go.mod h1:ArP7w60JQKkZf7UU2oWTVnEhoNGA+sOMyuSuS+JFNDQ= +sigs.k8s.io/controller-tools v0.4.1/go.mod h1:G9rHdZMVlBDocIxGkK3jHLWqcTMNvveypYJwrvYKjWU= sigs.k8s.io/kind v0.7.0/go.mod h1:An/AbWHT6pA/Lm0Og8j3ukGhfJP3RiVN/IBU6Lo3zl8= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= +sigs.k8s.io/structured-merge-diff v1.0.2 h1:WiMoyniAVAYm03w+ImfF9IE2G23GLR/SwDnQyaNZvPk= sigs.k8s.io/structured-merge-diff v1.0.2/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= sigs.k8s.io/testing_frameworks v0.1.2 h1:vK0+tvjF0BZ/RYFeZ1E6BYBwHJJXhjuZ3TdsEKH+UQM= sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/main.go b/main.go index a9035b99d..b56d2808a 100644 --- a/main.go +++ b/main.go @@ -58,7 +58,7 @@ func main() { flag.Parse() - ctrl.SetLogger(zap.Logger(true)) + ctrl.SetLogger(zap.New(zap.UseDevMode(true))) mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, diff --git a/pkg/client/kubernetes/apply.go b/pkg/client/kubernetes/apply.go index 62d4da074..a6b27f5c9 100644 --- a/pkg/client/kubernetes/apply.go +++ b/pkg/client/kubernetes/apply.go @@ -76,10 +76,7 @@ func (c *Applier) applyObject(ctx context.Context, desired *unstructured.Unstruc desired.SetNamespace(metav1.NamespaceDefault) } - key, err := client.ObjectKeyFromObject(desired) - if err != nil { - return err - } + key := client.ObjectKeyFromObject(desired) if len(key.Name) == 0 { return fmt.Errorf("Missing 'metadata.name' in: %+v", desired) @@ -87,7 +84,7 @@ func (c *Applier) applyObject(ctx context.Context, desired *unstructured.Unstruc current := &unstructured.Unstructured{} current.SetGroupVersionKind(desired.GroupVersionKind()) - err = c.client.Get(ctx, key, current) + err := c.client.Get(ctx, key, current) if meta.IsNoMatchError(err) { c.discovery.Invalidate() err = c.client.Get(ctx, key, current) diff --git a/vendor/github.com/docker/spdystream/CONTRIBUTING.md b/vendor/github.com/docker/spdystream/CONTRIBUTING.md new file mode 100644 index 000000000..d4eddcc53 --- /dev/null +++ b/vendor/github.com/docker/spdystream/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to SpdyStream + +Want to hack on spdystream? Awesome! Here are instructions to get you +started. + +SpdyStream is a part of the [Docker](https://docker.io) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/vendor/github.com/docker/spdystream/LICENSE b/vendor/github.com/docker/spdystream/LICENSE new file mode 100644 index 000000000..9e4bd4dbe --- /dev/null +++ b/vendor/github.com/docker/spdystream/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/spdystream/LICENSE.docs b/vendor/github.com/docker/spdystream/LICENSE.docs new file mode 100644 index 000000000..e26cd4fc8 --- /dev/null +++ b/vendor/github.com/docker/spdystream/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/docker/spdystream/MAINTAINERS b/vendor/github.com/docker/spdystream/MAINTAINERS new file mode 100644 index 000000000..14e263325 --- /dev/null +++ b/vendor/github.com/docker/spdystream/MAINTAINERS @@ -0,0 +1,28 @@ +# Spdystream maintainers file +# +# This file describes who runs the docker/spdystream project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "dmcgowan", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.dmcgowan] + Name = "Derek McGowan" + Email = "derek@docker.com" + GitHub = "dmcgowan" diff --git a/vendor/github.com/docker/spdystream/README.md b/vendor/github.com/docker/spdystream/README.md new file mode 100644 index 000000000..11cccd0a0 --- /dev/null +++ b/vendor/github.com/docker/spdystream/README.md @@ -0,0 +1,77 @@ +# SpdyStream + +A multiplexed stream library using spdy + +## Usage + +Client example (connecting to mirroring server without auth) + +```go +package main + +import ( + "fmt" + "github.com/docker/spdystream" + "net" + "net/http" +) + +func main() { + conn, err := net.Dial("tcp", "localhost:8080") + if err != nil { + panic(err) + } + spdyConn, err := spdystream.NewConnection(conn, false) + if err != nil { + panic(err) + } + go spdyConn.Serve(spdystream.NoOpStreamHandler) + stream, err := spdyConn.CreateStream(http.Header{}, nil, false) + if err != nil { + panic(err) + } + + stream.Wait() + + fmt.Fprint(stream, "Writing to stream") + + buf := make([]byte, 25) + stream.Read(buf) + fmt.Println(string(buf)) + + stream.Close() +} +``` + +Server example (mirroring server without auth) + +```go +package main + +import ( + "github.com/docker/spdystream" + "net" +) + +func main() { + listener, err := net.Listen("tcp", "localhost:8080") + if err != nil { + panic(err) + } + for { + conn, err := listener.Accept() + if err != nil { + panic(err) + } + spdyConn, err := spdystream.NewConnection(conn, true) + if err != nil { + panic(err) + } + go spdyConn.Serve(spdystream.MirrorStreamHandler) + } +} +``` + +## Copyright and license + +Copyright © 2014-2015 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/docker/spdystream/connection.go b/vendor/github.com/docker/spdystream/connection.go new file mode 100644 index 000000000..6031a0db1 --- /dev/null +++ b/vendor/github.com/docker/spdystream/connection.go @@ -0,0 +1,958 @@ +package spdystream + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + "github.com/docker/spdystream/spdy" +) + +var ( + ErrInvalidStreamId = errors.New("Invalid stream id") + ErrTimeout = errors.New("Timeout occured") + ErrReset = errors.New("Stream reset") + ErrWriteClosedStream = errors.New("Write on closed stream") +) + +const ( + FRAME_WORKERS = 5 + QUEUE_SIZE = 50 +) + +type StreamHandler func(stream *Stream) + +type AuthHandler func(header http.Header, slot uint8, parent uint32) bool + +type idleAwareFramer struct { + f *spdy.Framer + conn *Connection + writeLock sync.Mutex + resetChan chan struct{} + setTimeoutLock sync.Mutex + setTimeoutChan chan time.Duration + timeout time.Duration +} + +func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer { + iaf := &idleAwareFramer{ + f: framer, + resetChan: make(chan struct{}, 2), + // setTimeoutChan needs to be buffered to avoid deadlocks when calling setIdleTimeout at about + // the same time the connection is being closed + setTimeoutChan: make(chan time.Duration, 1), + } + return iaf +} + +func (i *idleAwareFramer) monitor() { + var ( + timer *time.Timer + expired <-chan time.Time + resetChan = i.resetChan + setTimeoutChan = i.setTimeoutChan + ) +Loop: + for { + select { + case timeout := <-i.setTimeoutChan: + i.timeout = timeout + if timeout == 0 { + if timer != nil { + timer.Stop() + } + } else { + if timer == nil { + timer = time.NewTimer(timeout) + expired = timer.C + } else { + timer.Reset(timeout) + } + } + case <-resetChan: + if timer != nil && i.timeout > 0 { + timer.Reset(i.timeout) + } + case <-expired: + i.conn.streamCond.L.Lock() + streams := i.conn.streams + i.conn.streams = make(map[spdy.StreamId]*Stream) + i.conn.streamCond.Broadcast() + i.conn.streamCond.L.Unlock() + go func() { + for _, stream := range streams { + stream.resetStream() + } + i.conn.Close() + }() + case <-i.conn.closeChan: + if timer != nil { + timer.Stop() + } + + // Start a goroutine to drain resetChan. This is needed because we've seen + // some unit tests with large numbers of goroutines get into a situation + // where resetChan fills up, at least 1 call to Write() is still trying to + // send to resetChan, the connection gets closed, and this case statement + // attempts to grab the write lock that Write() already has, causing a + // deadlock. + // + // See https://github.com/docker/spdystream/issues/49 for more details. + go func() { + for _ = range resetChan { + } + }() + + go func() { + for _ = range setTimeoutChan { + } + }() + + i.writeLock.Lock() + close(resetChan) + i.resetChan = nil + i.writeLock.Unlock() + + i.setTimeoutLock.Lock() + close(i.setTimeoutChan) + i.setTimeoutChan = nil + i.setTimeoutLock.Unlock() + + break Loop + } + } + + // Drain resetChan + for _ = range resetChan { + } +} + +func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error { + i.writeLock.Lock() + defer i.writeLock.Unlock() + if i.resetChan == nil { + return io.EOF + } + err := i.f.WriteFrame(frame) + if err != nil { + return err + } + + i.resetChan <- struct{}{} + + return nil +} + +func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) { + frame, err := i.f.ReadFrame() + if err != nil { + return nil, err + } + + // resetChan should never be closed since it is only closed + // when the connection has closed its closeChan. This closure + // only occurs after all Reads have finished + // TODO (dmcgowan): refactor relationship into connection + i.resetChan <- struct{}{} + + return frame, nil +} + +func (i *idleAwareFramer) setIdleTimeout(timeout time.Duration) { + i.setTimeoutLock.Lock() + defer i.setTimeoutLock.Unlock() + + if i.setTimeoutChan == nil { + return + } + + i.setTimeoutChan <- timeout +} + +type Connection struct { + conn net.Conn + framer *idleAwareFramer + + closeChan chan bool + goneAway bool + lastStreamChan chan<- *Stream + goAwayTimeout time.Duration + closeTimeout time.Duration + + streamLock *sync.RWMutex + streamCond *sync.Cond + streams map[spdy.StreamId]*Stream + + nextIdLock sync.Mutex + receiveIdLock sync.Mutex + nextStreamId spdy.StreamId + receivedStreamId spdy.StreamId + + pingIdLock sync.Mutex + pingId uint32 + pingChans map[uint32]chan error + + shutdownLock sync.Mutex + shutdownChan chan error + hasShutdown bool + + // for testing https://github.com/docker/spdystream/pull/56 + dataFrameHandler func(*spdy.DataFrame) error +} + +// NewConnection creates a new spdy connection from an existing +// network connection. +func NewConnection(conn net.Conn, server bool) (*Connection, error) { + framer, framerErr := spdy.NewFramer(conn, conn) + if framerErr != nil { + return nil, framerErr + } + idleAwareFramer := newIdleAwareFramer(framer) + var sid spdy.StreamId + var rid spdy.StreamId + var pid uint32 + if server { + sid = 2 + rid = 1 + pid = 2 + } else { + sid = 1 + rid = 2 + pid = 1 + } + + streamLock := new(sync.RWMutex) + streamCond := sync.NewCond(streamLock) + + session := &Connection{ + conn: conn, + framer: idleAwareFramer, + + closeChan: make(chan bool), + goAwayTimeout: time.Duration(0), + closeTimeout: time.Duration(0), + + streamLock: streamLock, + streamCond: streamCond, + streams: make(map[spdy.StreamId]*Stream), + nextStreamId: sid, + receivedStreamId: rid, + + pingId: pid, + pingChans: make(map[uint32]chan error), + + shutdownChan: make(chan error), + } + session.dataFrameHandler = session.handleDataFrame + idleAwareFramer.conn = session + go idleAwareFramer.monitor() + + return session, nil +} + +// Ping sends a ping frame across the connection and +// returns the response time +func (s *Connection) Ping() (time.Duration, error) { + pid := s.pingId + s.pingIdLock.Lock() + if s.pingId > 0x7ffffffe { + s.pingId = s.pingId - 0x7ffffffe + } else { + s.pingId = s.pingId + 2 + } + s.pingIdLock.Unlock() + pingChan := make(chan error) + s.pingChans[pid] = pingChan + defer delete(s.pingChans, pid) + + frame := &spdy.PingFrame{Id: pid} + startTime := time.Now() + writeErr := s.framer.WriteFrame(frame) + if writeErr != nil { + return time.Duration(0), writeErr + } + select { + case <-s.closeChan: + return time.Duration(0), errors.New("connection closed") + case err, ok := <-pingChan: + if ok && err != nil { + return time.Duration(0), err + } + break + } + return time.Now().Sub(startTime), nil +} + +// Serve handles frames sent from the server, including reply frames +// which are needed to fully initiate connections. Both clients and servers +// should call Serve in a separate goroutine before creating streams. +func (s *Connection) Serve(newHandler StreamHandler) { + // use a WaitGroup to wait for all frames to be drained after receiving + // go-away. + var wg sync.WaitGroup + + // Parition queues to ensure stream frames are handled + // by the same worker, ensuring order is maintained + frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS) + for i := 0; i < FRAME_WORKERS; i++ { + frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE) + + // Ensure frame queue is drained when connection is closed + go func(frameQueue *PriorityFrameQueue) { + <-s.closeChan + frameQueue.Drain() + }(frameQueues[i]) + + wg.Add(1) + go func(frameQueue *PriorityFrameQueue) { + // let the WaitGroup know this worker is done + defer wg.Done() + + s.frameHandler(frameQueue, newHandler) + }(frameQueues[i]) + } + + var ( + partitionRoundRobin int + goAwayFrame *spdy.GoAwayFrame + ) +Loop: + for { + readFrame, err := s.framer.ReadFrame() + if err != nil { + if err != io.EOF { + fmt.Errorf("frame read error: %s", err) + } else { + debugMessage("(%p) EOF received", s) + } + break + } + var priority uint8 + var partition int + switch frame := readFrame.(type) { + case *spdy.SynStreamFrame: + if s.checkStreamFrame(frame) { + priority = frame.Priority + partition = int(frame.StreamId % FRAME_WORKERS) + debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId) + s.addStreamFrame(frame) + } else { + debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId) + continue + } + case *spdy.SynReplyFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.DataFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.RstStreamFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.HeadersFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.PingFrame: + priority = 0 + partition = partitionRoundRobin + partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS + case *spdy.GoAwayFrame: + // hold on to the go away frame and exit the loop + goAwayFrame = frame + break Loop + default: + priority = 7 + partition = partitionRoundRobin + partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS + } + frameQueues[partition].Push(readFrame, priority) + } + close(s.closeChan) + + // wait for all frame handler workers to indicate they've drained their queues + // before handling the go away frame + wg.Wait() + + if goAwayFrame != nil { + s.handleGoAwayFrame(goAwayFrame) + } + + // now it's safe to close remote channels and empty s.streams + s.streamCond.L.Lock() + // notify streams that they're now closed, which will + // unblock any stream Read() calls + for _, stream := range s.streams { + stream.closeRemoteChannels() + } + s.streams = make(map[spdy.StreamId]*Stream) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) { + for { + popFrame := frameQueue.Pop() + if popFrame == nil { + return + } + + var frameErr error + switch frame := popFrame.(type) { + case *spdy.SynStreamFrame: + frameErr = s.handleStreamFrame(frame, newHandler) + case *spdy.SynReplyFrame: + frameErr = s.handleReplyFrame(frame) + case *spdy.DataFrame: + frameErr = s.dataFrameHandler(frame) + case *spdy.RstStreamFrame: + frameErr = s.handleResetFrame(frame) + case *spdy.HeadersFrame: + frameErr = s.handleHeaderFrame(frame) + case *spdy.PingFrame: + frameErr = s.handlePingFrame(frame) + case *spdy.GoAwayFrame: + frameErr = s.handleGoAwayFrame(frame) + default: + frameErr = fmt.Errorf("unhandled frame type: %T", frame) + } + + if frameErr != nil { + fmt.Errorf("frame handling error: %s", frameErr) + } + } +} + +func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 { + stream, streamOk := s.getStream(streamId) + if !streamOk { + return 7 + } + return stream.priority +} + +func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) { + var parent *Stream + if frame.AssociatedToStreamId != spdy.StreamId(0) { + parent, _ = s.getStream(frame.AssociatedToStreamId) + } + + stream := &Stream{ + streamId: frame.StreamId, + parent: parent, + conn: s, + startChan: make(chan error), + headers: frame.Headers, + finished: (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00, + replyCond: sync.NewCond(new(sync.Mutex)), + dataChan: make(chan []byte), + headerChan: make(chan http.Header), + closeChan: make(chan bool), + } + if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 { + stream.closeRemoteChannels() + } + + s.addStream(stream) +} + +// checkStreamFrame checks to see if a stream frame is allowed. +// If the stream is invalid, then a reset frame with protocol error +// will be returned. +func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool { + s.receiveIdLock.Lock() + defer s.receiveIdLock.Unlock() + if s.goneAway { + return false + } + validationErr := s.validateStreamId(frame.StreamId) + if validationErr != nil { + go func() { + resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId) + if resetErr != nil { + fmt.Errorf("reset error: %s", resetErr) + } + }() + return false + } + return true +} + +func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error { + stream, ok := s.getStream(frame.StreamId) + if !ok { + return fmt.Errorf("Missing stream: %d", frame.StreamId) + } + + newHandler(stream) + + return nil +} + +func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error { + debugMessage("(%p) Reply frame received for %d", s, frame.StreamId) + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + debugMessage("Reply frame gone away for %d", frame.StreamId) + // Stream has already gone away + return nil + } + if stream.replied { + // Stream has already received reply + return nil + } + stream.replied = true + + // TODO Check for error + if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + + close(stream.startChan) + + return nil +} + +func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error { + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + // Stream has already been removed + return nil + } + s.removeStream(stream) + stream.closeRemoteChannels() + + if !stream.replied { + stream.replied = true + stream.startChan <- ErrReset + close(stream.startChan) + } + + stream.finishLock.Lock() + stream.finished = true + stream.finishLock.Unlock() + + return nil +} + +func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error { + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + // Stream has already gone away + return nil + } + if !stream.replied { + // No reply received...Protocol error? + return nil + } + + // TODO limit headers while not blocking (use buffered chan or goroutine?) + select { + case <-stream.closeChan: + return nil + case stream.headerChan <- frame.Headers: + } + + if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + + return nil +} + +func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error { + debugMessage("(%p) Data frame received for %d", s, frame.StreamId) + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + debugMessage("(%p) Data frame gone away for %d", s, frame.StreamId) + // Stream has already gone away + return nil + } + if !stream.replied { + debugMessage("(%p) Data frame not replied %d", s, frame.StreamId) + // No reply received...Protocol error? + return nil + } + + debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId) + if len(frame.Data) > 0 { + stream.dataLock.RLock() + select { + case <-stream.closeChan: + debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId) + case stream.dataChan <- frame.Data: + debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId) + } + stream.dataLock.RUnlock() + } + if (frame.Flags & spdy.DataFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + return nil +} + +func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error { + if s.pingId&0x01 != frame.Id&0x01 { + return s.framer.WriteFrame(frame) + } + pingChan, pingOk := s.pingChans[frame.Id] + if pingOk { + close(pingChan) + } + return nil +} + +func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error { + debugMessage("(%p) Go away received", s) + s.receiveIdLock.Lock() + if s.goneAway { + s.receiveIdLock.Unlock() + return nil + } + s.goneAway = true + s.receiveIdLock.Unlock() + + if s.lastStreamChan != nil { + stream, _ := s.getStream(frame.LastGoodStreamId) + go func() { + s.lastStreamChan <- stream + }() + } + + // Do not block frame handler waiting for closure + go s.shutdown(s.goAwayTimeout) + + return nil +} + +func (s *Connection) remoteStreamFinish(stream *Stream) { + stream.closeRemoteChannels() + + stream.finishLock.Lock() + if stream.finished { + // Stream is fully closed, cleanup + s.removeStream(stream) + } + stream.finishLock.Unlock() +} + +// CreateStream creates a new spdy stream using the parameters for +// creating the stream frame. The stream frame will be sent upon +// calling this function, however this function does not wait for +// the reply frame. If waiting for the reply is desired, use +// the stream Wait or WaitTimeout function on the stream returned +// by this function. +func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) { + // MUST synchronize stream creation (all the way to writing the frame) + // as stream IDs **MUST** increase monotonically. + s.nextIdLock.Lock() + defer s.nextIdLock.Unlock() + + streamId := s.getNextStreamId() + if streamId == 0 { + return nil, fmt.Errorf("Unable to get new stream id") + } + + stream := &Stream{ + streamId: streamId, + parent: parent, + conn: s, + startChan: make(chan error), + headers: headers, + dataChan: make(chan []byte), + headerChan: make(chan http.Header), + closeChan: make(chan bool), + } + + debugMessage("(%p) (%p) Create stream", s, stream) + + s.addStream(stream) + + return stream, s.sendStream(stream, fin) +} + +func (s *Connection) shutdown(closeTimeout time.Duration) { + // TODO Ensure this isn't called multiple times + s.shutdownLock.Lock() + if s.hasShutdown { + s.shutdownLock.Unlock() + return + } + s.hasShutdown = true + s.shutdownLock.Unlock() + + var timeout <-chan time.Time + if closeTimeout > time.Duration(0) { + timeout = time.After(closeTimeout) + } + streamsClosed := make(chan bool) + + go func() { + s.streamCond.L.Lock() + for len(s.streams) > 0 { + debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams) + s.streamCond.Wait() + } + s.streamCond.L.Unlock() + close(streamsClosed) + }() + + var err error + select { + case <-streamsClosed: + // No active streams, close should be safe + err = s.conn.Close() + case <-timeout: + // Force ungraceful close + err = s.conn.Close() + // Wait for cleanup to clear active streams + <-streamsClosed + } + + if err != nil { + duration := 10 * time.Minute + time.AfterFunc(duration, func() { + select { + case err, ok := <-s.shutdownChan: + if ok { + fmt.Errorf("Unhandled close error after %s: %s", duration, err) + } + default: + } + }) + s.shutdownChan <- err + } + close(s.shutdownChan) + + return +} + +// Closes spdy connection by sending GoAway frame and initiating shutdown +func (s *Connection) Close() error { + s.receiveIdLock.Lock() + if s.goneAway { + s.receiveIdLock.Unlock() + return nil + } + s.goneAway = true + s.receiveIdLock.Unlock() + + var lastStreamId spdy.StreamId + if s.receivedStreamId > 2 { + lastStreamId = s.receivedStreamId - 2 + } + + goAwayFrame := &spdy.GoAwayFrame{ + LastGoodStreamId: lastStreamId, + Status: spdy.GoAwayOK, + } + + err := s.framer.WriteFrame(goAwayFrame) + if err != nil { + return err + } + + go s.shutdown(s.closeTimeout) + + return nil +} + +// CloseWait closes the connection and waits for shutdown +// to finish. Note the underlying network Connection +// is not closed until the end of shutdown. +func (s *Connection) CloseWait() error { + closeErr := s.Close() + if closeErr != nil { + return closeErr + } + shutdownErr, ok := <-s.shutdownChan + if ok { + return shutdownErr + } + return nil +} + +// Wait waits for the connection to finish shutdown or for +// the wait timeout duration to expire. This needs to be +// called either after Close has been called or the GOAWAYFRAME +// has been received. If the wait timeout is 0, this function +// will block until shutdown finishes. If wait is never called +// and a shutdown error occurs, that error will be logged as an +// unhandled error. +func (s *Connection) Wait(waitTimeout time.Duration) error { + var timeout <-chan time.Time + if waitTimeout > time.Duration(0) { + timeout = time.After(waitTimeout) + } + + select { + case err, ok := <-s.shutdownChan: + if ok { + return err + } + case <-timeout: + return ErrTimeout + } + return nil +} + +// NotifyClose registers a channel to be called when the remote +// peer inidicates connection closure. The last stream to be +// received by the remote will be sent on the channel. The notify +// timeout will determine the duration between go away received +// and the connection being closed. +func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) { + s.goAwayTimeout = timeout + s.lastStreamChan = c +} + +// SetCloseTimeout sets the amount of time close will wait for +// streams to finish before terminating the underlying network +// connection. Setting the timeout to 0 will cause close to +// wait forever, which is the default. +func (s *Connection) SetCloseTimeout(timeout time.Duration) { + s.closeTimeout = timeout +} + +// SetIdleTimeout sets the amount of time the connection may sit idle before +// it is forcefully terminated. +func (s *Connection) SetIdleTimeout(timeout time.Duration) { + s.framer.setIdleTimeout(timeout) +} + +func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + } + + headerFrame := &spdy.HeadersFrame{ + StreamId: stream.streamId, + Headers: headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(headerFrame) +} + +func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + } + + replyFrame := &spdy.SynReplyFrame{ + StreamId: stream.streamId, + Headers: headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(replyFrame) +} + +func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error { + resetFrame := &spdy.RstStreamFrame{ + StreamId: streamId, + Status: status, + } + + return s.framer.WriteFrame(resetFrame) +} + +func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error { + return s.sendResetFrame(status, stream.streamId) +} + +func (s *Connection) sendStream(stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + stream.finished = true + } + + var parentId spdy.StreamId + if stream.parent != nil { + parentId = stream.parent.streamId + } + + streamFrame := &spdy.SynStreamFrame{ + StreamId: spdy.StreamId(stream.streamId), + AssociatedToStreamId: spdy.StreamId(parentId), + Headers: stream.headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(streamFrame) +} + +// getNextStreamId returns the next sequential id +// every call should produce a unique value or an error +func (s *Connection) getNextStreamId() spdy.StreamId { + sid := s.nextStreamId + if sid > 0x7fffffff { + return 0 + } + s.nextStreamId = s.nextStreamId + 2 + return sid +} + +// PeekNextStreamId returns the next sequential id and keeps the next id untouched +func (s *Connection) PeekNextStreamId() spdy.StreamId { + sid := s.nextStreamId + return sid +} + +func (s *Connection) validateStreamId(rid spdy.StreamId) error { + if rid > 0x7fffffff || rid < s.receivedStreamId { + return ErrInvalidStreamId + } + s.receivedStreamId = rid + 2 + return nil +} + +func (s *Connection) addStream(stream *Stream) { + s.streamCond.L.Lock() + s.streams[stream.streamId] = stream + debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) removeStream(stream *Stream) { + s.streamCond.L.Lock() + delete(s.streams, stream.streamId) + debugMessage("(%p) (%p) Stream removed, broadcasting: %d", s, stream, stream.streamId) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) { + s.streamLock.RLock() + stream, ok = s.streams[streamId] + s.streamLock.RUnlock() + return +} + +// FindStream looks up the given stream id and either waits for the +// stream to be found or returns nil if the stream id is no longer +// valid. +func (s *Connection) FindStream(streamId uint32) *Stream { + var stream *Stream + var ok bool + s.streamCond.L.Lock() + stream, ok = s.streams[spdy.StreamId(streamId)] + debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok) + for !ok && streamId >= uint32(s.receivedStreamId) { + s.streamCond.Wait() + stream, ok = s.streams[spdy.StreamId(streamId)] + } + s.streamCond.L.Unlock() + return stream +} + +func (s *Connection) CloseChan() <-chan bool { + return s.closeChan +} diff --git a/vendor/github.com/docker/spdystream/handlers.go b/vendor/github.com/docker/spdystream/handlers.go new file mode 100644 index 000000000..b59fa5fdc --- /dev/null +++ b/vendor/github.com/docker/spdystream/handlers.go @@ -0,0 +1,38 @@ +package spdystream + +import ( + "io" + "net/http" +) + +// MirrorStreamHandler mirrors all streams. +func MirrorStreamHandler(stream *Stream) { + replyErr := stream.SendReply(http.Header{}, false) + if replyErr != nil { + return + } + + go func() { + io.Copy(stream, stream) + stream.Close() + }() + go func() { + for { + header, receiveErr := stream.ReceiveHeader() + if receiveErr != nil { + return + } + sendErr := stream.SendHeader(header, false) + if sendErr != nil { + return + } + } + }() +} + +// NoopStreamHandler does nothing when stream connects, most +// likely used with RejectAuthHandler which will not allow any +// streams to make it to the stream handler. +func NoOpStreamHandler(stream *Stream) { + stream.SendReply(http.Header{}, false) +} diff --git a/vendor/github.com/docker/spdystream/priority.go b/vendor/github.com/docker/spdystream/priority.go new file mode 100644 index 000000000..fc8582b5c --- /dev/null +++ b/vendor/github.com/docker/spdystream/priority.go @@ -0,0 +1,98 @@ +package spdystream + +import ( + "container/heap" + "sync" + + "github.com/docker/spdystream/spdy" +) + +type prioritizedFrame struct { + frame spdy.Frame + priority uint8 + insertId uint64 +} + +type frameQueue []*prioritizedFrame + +func (fq frameQueue) Len() int { + return len(fq) +} + +func (fq frameQueue) Less(i, j int) bool { + if fq[i].priority == fq[j].priority { + return fq[i].insertId < fq[j].insertId + } + return fq[i].priority < fq[j].priority +} + +func (fq frameQueue) Swap(i, j int) { + fq[i], fq[j] = fq[j], fq[i] +} + +func (fq *frameQueue) Push(x interface{}) { + *fq = append(*fq, x.(*prioritizedFrame)) +} + +func (fq *frameQueue) Pop() interface{} { + old := *fq + n := len(old) + *fq = old[0 : n-1] + return old[n-1] +} + +type PriorityFrameQueue struct { + queue *frameQueue + c *sync.Cond + size int + nextInsertId uint64 + drain bool +} + +func NewPriorityFrameQueue(size int) *PriorityFrameQueue { + queue := make(frameQueue, 0, size) + heap.Init(&queue) + + return &PriorityFrameQueue{ + queue: &queue, + size: size, + c: sync.NewCond(&sync.Mutex{}), + } +} + +func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) { + q.c.L.Lock() + defer q.c.L.Unlock() + for q.queue.Len() >= q.size { + q.c.Wait() + } + pFrame := &prioritizedFrame{ + frame: frame, + priority: priority, + insertId: q.nextInsertId, + } + q.nextInsertId = q.nextInsertId + 1 + heap.Push(q.queue, pFrame) + q.c.Signal() +} + +func (q *PriorityFrameQueue) Pop() spdy.Frame { + q.c.L.Lock() + defer q.c.L.Unlock() + for q.queue.Len() == 0 { + if q.drain { + return nil + } + q.c.Wait() + } + frame := heap.Pop(q.queue).(*prioritizedFrame).frame + q.c.Signal() + return frame +} + +func (q *PriorityFrameQueue) Drain() { + q.c.L.Lock() + defer q.c.L.Unlock() + q.drain = true + q.c.Broadcast() +} diff --git a/vendor/github.com/docker/spdystream/spdy/dictionary.go b/vendor/github.com/docker/spdystream/spdy/dictionary.go new file mode 100644 index 000000000..5a5ff0e14 --- /dev/null +++ b/vendor/github.com/docker/spdystream/spdy/dictionary.go @@ -0,0 +1,187 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +// headerDictionary is the dictionary sent to the zlib compressor/decompressor. +var headerDictionary = []byte{ + 0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68, + 0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70, + 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70, + 0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00, + 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00, + 0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, + 0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63, + 0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f, + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00, + 0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, + 0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00, + 0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65, + 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, + 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, + 0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00, + 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, + 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00, + 0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00, + 0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00, + 0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, + 0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66, + 0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69, + 0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, + 0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f, + 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73, + 0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d, + 0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00, + 0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67, + 0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d, + 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65, + 0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74, + 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, + 0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, + 0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72, + 0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00, + 0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00, + 0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00, + 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00, + 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72, + 0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00, + 0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00, + 0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, + 0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00, + 0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73, + 0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79, + 0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00, + 0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77, + 0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, + 0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00, + 0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30, + 0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00, + 0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31, + 0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72, + 0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73, + 0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69, + 0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65, + 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00, + 0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32, + 0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35, + 0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30, + 0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33, + 0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37, + 0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30, + 0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34, + 0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31, + 0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31, + 0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34, + 0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34, + 0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e, + 0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20, + 0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f, + 0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d, + 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34, + 0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30, + 0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30, + 0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64, + 0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e, + 0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64, + 0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74, + 0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, + 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, + 0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46, + 0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41, + 0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a, + 0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41, + 0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20, + 0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20, + 0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30, + 0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e, + 0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57, + 0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c, + 0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61, + 0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20, + 0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b, + 0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, + 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61, + 0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67, + 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67, + 0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, + 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, + 0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c, + 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c, + 0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74, + 0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65, + 0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65, + 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64, + 0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, + 0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63, + 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69, + 0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d, + 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a, + 0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e, +} diff --git a/vendor/github.com/docker/spdystream/spdy/read.go b/vendor/github.com/docker/spdystream/spdy/read.go new file mode 100644 index 000000000..9359a9501 --- /dev/null +++ b/vendor/github.com/docker/spdystream/spdy/read.go @@ -0,0 +1,348 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +import ( + "compress/zlib" + "encoding/binary" + "io" + "net/http" + "strings" +) + +func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readSynStreamFrame(h, frame) +} + +func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readSynReplyFrame(h, frame) +} + +func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { + return err + } + if frame.Status == 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + var numSettings uint32 + if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil { + return err + } + frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings) + for i := uint32(0); i < numSettings; i++ { + if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil { + return err + } + frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24) + frame.FlagIdValues[i].Id &= 0xffffff + if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil { + return err + } + } + return nil +} + +func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil { + return err + } + if frame.Id == 0 { + return &Error{ZeroStreamId, 0} + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, StreamId(frame.Id)} + } + return nil +} + +func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil { + return err + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, frame.LastGoodStreamId} + } + if frame.CFHeader.length != 8 { + return &Error{InvalidControlFrame, frame.LastGoodStreamId} + } + if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { + return err + } + return nil +} + +func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readHeadersFrame(h, frame) +} + +func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if frame.CFHeader.length != 8 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil { + return err + } + return nil +} + +func newControlFrame(frameType ControlFrameType) (controlFrame, error) { + ctor, ok := cframeCtor[frameType] + if !ok { + return nil, &Error{Err: InvalidControlFrame} + } + return ctor(), nil +} + +var cframeCtor = map[ControlFrameType]func() controlFrame{ + TypeSynStream: func() controlFrame { return new(SynStreamFrame) }, + TypeSynReply: func() controlFrame { return new(SynReplyFrame) }, + TypeRstStream: func() controlFrame { return new(RstStreamFrame) }, + TypeSettings: func() controlFrame { return new(SettingsFrame) }, + TypePing: func() controlFrame { return new(PingFrame) }, + TypeGoAway: func() controlFrame { return new(GoAwayFrame) }, + TypeHeaders: func() controlFrame { return new(HeadersFrame) }, + TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) }, +} + +func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error { + if f.headerDecompressor != nil { + f.headerReader.N = payloadSize + return nil + } + f.headerReader = io.LimitedReader{R: f.r, N: payloadSize} + decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary)) + if err != nil { + return err + } + f.headerDecompressor = decompressor + return nil +} + +// ReadFrame reads SPDY encoded data and returns a decompressed Frame. +func (f *Framer) ReadFrame() (Frame, error) { + var firstWord uint32 + if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil { + return nil, err + } + if firstWord&0x80000000 != 0 { + frameType := ControlFrameType(firstWord & 0xffff) + version := uint16(firstWord >> 16 & 0x7fff) + return f.parseControlFrame(version, frameType) + } + return f.parseDataFrame(StreamId(firstWord & 0x7fffffff)) +} + +func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) { + var length uint32 + if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { + return nil, err + } + flags := ControlFlags((length & 0xff000000) >> 24) + length &= 0xffffff + header := ControlFrameHeader{version, frameType, flags, length} + cframe, err := newControlFrame(frameType) + if err != nil { + return nil, err + } + if err = cframe.read(header, f); err != nil { + return nil, err + } + return cframe, nil +} + +func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) { + var numHeaders uint32 + if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil { + return nil, err + } + var e error + h := make(http.Header, int(numHeaders)) + for i := 0; i < int(numHeaders); i++ { + var length uint32 + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + nameBytes := make([]byte, length) + if _, err := io.ReadFull(r, nameBytes); err != nil { + return nil, err + } + name := string(nameBytes) + if name != strings.ToLower(name) { + e = &Error{UnlowercasedHeaderName, streamId} + name = strings.ToLower(name) + } + if h[name] != nil { + e = &Error{DuplicateHeaders, streamId} + } + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + value := make([]byte, length) + if _, err := io.ReadFull(r, value); err != nil { + return nil, err + } + valueList := strings.Split(string(value), headerValueSeparator) + for _, v := range valueList { + h.Add(name, v) + } + } + if e != nil { + return h, e + } + return h, nil +} + +func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil { + return err + } + if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil { + return err + } + frame.Priority >>= 5 + if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 10)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + for h := range frame.Headers { + if invalidReqHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 4)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + for h := range frame.Headers { + if invalidRespHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 4)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + var invalidHeaders map[string]bool + if frame.StreamId%2 == 0 { + invalidHeaders = invalidReqHeaders + } else { + invalidHeaders = invalidRespHeaders + } + for h := range frame.Headers { + if invalidHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) { + var length uint32 + if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { + return nil, err + } + var frame DataFrame + frame.StreamId = streamId + frame.Flags = DataFlags(length >> 24) + length &= 0xffffff + frame.Data = make([]byte, length) + if _, err := io.ReadFull(f.r, frame.Data); err != nil { + return nil, err + } + if frame.StreamId == 0 { + return nil, &Error{ZeroStreamId, 0} + } + return &frame, nil +} diff --git a/vendor/github.com/docker/spdystream/spdy/types.go b/vendor/github.com/docker/spdystream/spdy/types.go new file mode 100644 index 000000000..7b6ee9c6f --- /dev/null +++ b/vendor/github.com/docker/spdystream/spdy/types.go @@ -0,0 +1,275 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package spdy implements the SPDY protocol (currently SPDY/3), described in +// http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3. +package spdy + +import ( + "bytes" + "compress/zlib" + "io" + "net/http" +) + +// Version is the protocol version number that this package implements. +const Version = 3 + +// ControlFrameType stores the type field in a control frame header. +type ControlFrameType uint16 + +const ( + TypeSynStream ControlFrameType = 0x0001 + TypeSynReply = 0x0002 + TypeRstStream = 0x0003 + TypeSettings = 0x0004 + TypePing = 0x0006 + TypeGoAway = 0x0007 + TypeHeaders = 0x0008 + TypeWindowUpdate = 0x0009 +) + +// ControlFlags are the flags that can be set on a control frame. +type ControlFlags uint8 + +const ( + ControlFlagFin ControlFlags = 0x01 + ControlFlagUnidirectional = 0x02 + ControlFlagSettingsClearSettings = 0x01 +) + +// DataFlags are the flags that can be set on a data frame. +type DataFlags uint8 + +const ( + DataFlagFin DataFlags = 0x01 +) + +// MaxDataLength is the maximum number of bytes that can be stored in one frame. +const MaxDataLength = 1<<24 - 1 + +// headerValueSepator separates multiple header values. +const headerValueSeparator = "\x00" + +// Frame is a single SPDY frame in its unpacked in-memory representation. Use +// Framer to read and write it. +type Frame interface { + write(f *Framer) error +} + +// ControlFrameHeader contains all the fields in a control frame header, +// in its unpacked in-memory representation. +type ControlFrameHeader struct { + // Note, high bit is the "Control" bit. + version uint16 // spdy version number + frameType ControlFrameType + Flags ControlFlags + length uint32 // length of data field +} + +type controlFrame interface { + Frame + read(h ControlFrameHeader, f *Framer) error +} + +// StreamId represents a 31-bit value identifying the stream. +type StreamId uint32 + +// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM +// frame. +type SynStreamFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to + Priority uint8 // priority of this frame (3-bit) + Slot uint8 // index in the server's credential vector of the client certificate + Headers http.Header +} + +// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame. +type SynReplyFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Headers http.Header +} + +// RstStreamStatus represents the status that led to a RST_STREAM. +type RstStreamStatus uint32 + +const ( + ProtocolError RstStreamStatus = iota + 1 + InvalidStream + RefusedStream + UnsupportedVersion + Cancel + InternalError + FlowControlError + StreamInUse + StreamAlreadyClosed + InvalidCredentials + FrameTooLarge +) + +// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM +// frame. +type RstStreamFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Status RstStreamStatus +} + +// SettingsFlag represents a flag in a SETTINGS frame. +type SettingsFlag uint8 + +const ( + FlagSettingsPersistValue SettingsFlag = 0x1 + FlagSettingsPersisted = 0x2 +) + +// SettingsFlag represents the id of an id/value pair in a SETTINGS frame. +type SettingsId uint32 + +const ( + SettingsUploadBandwidth SettingsId = iota + 1 + SettingsDownloadBandwidth + SettingsRoundTripTime + SettingsMaxConcurrentStreams + SettingsCurrentCwnd + SettingsDownloadRetransRate + SettingsInitialWindowSize + SettingsClientCretificateVectorSize +) + +// SettingsFlagIdValue is the unpacked, in-memory representation of the +// combined flag/id/value for a setting in a SETTINGS frame. +type SettingsFlagIdValue struct { + Flag SettingsFlag + Id SettingsId + Value uint32 +} + +// SettingsFrame is the unpacked, in-memory representation of a SPDY +// SETTINGS frame. +type SettingsFrame struct { + CFHeader ControlFrameHeader + FlagIdValues []SettingsFlagIdValue +} + +// PingFrame is the unpacked, in-memory representation of a PING frame. +type PingFrame struct { + CFHeader ControlFrameHeader + Id uint32 // unique id for this ping, from server is even, from client is odd. +} + +// GoAwayStatus represents the status in a GoAwayFrame. +type GoAwayStatus uint32 + +const ( + GoAwayOK GoAwayStatus = iota + GoAwayProtocolError + GoAwayInternalError +) + +// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame. +type GoAwayFrame struct { + CFHeader ControlFrameHeader + LastGoodStreamId StreamId // last stream id which was accepted by sender + Status GoAwayStatus +} + +// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame. +type HeadersFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Headers http.Header +} + +// WindowUpdateFrame is the unpacked, in-memory representation of a +// WINDOW_UPDATE frame. +type WindowUpdateFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + DeltaWindowSize uint32 // additional number of bytes to existing window size +} + +// TODO: Implement credential frame and related methods. + +// DataFrame is the unpacked, in-memory representation of a DATA frame. +type DataFrame struct { + // Note, high bit is the "Control" bit. Should be 0 for data frames. + StreamId StreamId + Flags DataFlags + Data []byte // payload data of this frame +} + +// A SPDY specific error. +type ErrorCode string + +const ( + UnlowercasedHeaderName ErrorCode = "header was not lowercased" + DuplicateHeaders = "multiple headers with same name" + WrongCompressedPayloadSize = "compressed payload size was incorrect" + UnknownFrameType = "unknown frame type" + InvalidControlFrame = "invalid control frame" + InvalidDataFrame = "invalid data frame" + InvalidHeaderPresent = "frame contained invalid header" + ZeroStreamId = "stream id zero is disallowed" +) + +// Error contains both the type of error and additional values. StreamId is 0 +// if Error is not associated with a stream. +type Error struct { + Err ErrorCode + StreamId StreamId +} + +func (e *Error) Error() string { + return string(e.Err) +} + +var invalidReqHeaders = map[string]bool{ + "Connection": true, + "Host": true, + "Keep-Alive": true, + "Proxy-Connection": true, + "Transfer-Encoding": true, +} + +var invalidRespHeaders = map[string]bool{ + "Connection": true, + "Keep-Alive": true, + "Proxy-Connection": true, + "Transfer-Encoding": true, +} + +// Framer handles serializing/deserializing SPDY frames, including compressing/ +// decompressing payloads. +type Framer struct { + headerCompressionDisabled bool + w io.Writer + headerBuf *bytes.Buffer + headerCompressor *zlib.Writer + r io.Reader + headerReader io.LimitedReader + headerDecompressor io.ReadCloser +} + +// NewFramer allocates a new Framer for a given SPDY connection, represented by +// a io.Writer and io.Reader. Note that Framer will read and write individual fields +// from/to the Reader and Writer, so the caller should pass in an appropriately +// buffered implementation to optimize performance. +func NewFramer(w io.Writer, r io.Reader) (*Framer, error) { + compressBuf := new(bytes.Buffer) + compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary)) + if err != nil { + return nil, err + } + framer := &Framer{ + w: w, + headerBuf: compressBuf, + headerCompressor: compressor, + r: r, + } + return framer, nil +} diff --git a/vendor/github.com/docker/spdystream/spdy/write.go b/vendor/github.com/docker/spdystream/spdy/write.go new file mode 100644 index 000000000..b212f66a2 --- /dev/null +++ b/vendor/github.com/docker/spdystream/spdy/write.go @@ -0,0 +1,318 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +import ( + "encoding/binary" + "io" + "net/http" + "strings" +) + +func (frame *SynStreamFrame) write(f *Framer) error { + return f.writeSynStreamFrame(frame) +} + +func (frame *SynReplyFrame) write(f *Framer) error { + return f.writeSynReplyFrame(frame) +} + +func (frame *RstStreamFrame) write(f *Framer) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeRstStream + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if frame.Status == 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { + return + } + return +} + +func (frame *SettingsFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSettings + frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil { + return + } + for _, flagIdValue := range frame.FlagIdValues { + flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id) + if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil { + return + } + } + return +} + +func (frame *PingFrame) write(f *Framer) (err error) { + if frame.Id == 0 { + return &Error{ZeroStreamId, 0} + } + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypePing + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 4 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil { + return + } + return +} + +func (frame *GoAwayFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeGoAway + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { + return + } + return nil +} + +func (frame *HeadersFrame) write(f *Framer) error { + return f.writeHeadersFrame(frame) +} + +func (frame *WindowUpdateFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeWindowUpdate + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil { + return + } + return nil +} + +func (frame *DataFrame) write(f *Framer) error { + return f.writeDataFrame(frame) +} + +// WriteFrame writes a frame. +func (f *Framer) WriteFrame(frame Frame) error { + return frame.write(f) +} + +func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error { + if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil { + return err + } + if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil { + return err + } + flagsAndLength := uint32(h.Flags)<<24 | h.length + if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil { + return err + } + return nil +} + +func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) { + n = 0 + if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil { + return + } + n += 2 + for name, values := range h { + if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil { + return + } + n += 2 + name = strings.ToLower(name) + if _, err = io.WriteString(w, name); err != nil { + return + } + n += len(name) + v := strings.Join(values, headerValueSeparator) + if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil { + return + } + n += 2 + if _, err = io.WriteString(w, v); err != nil { + return + } + n += len(v) + } + return +} + +func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSynStream + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil { + return err + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return err + } + f.headerBuf.Reset() + return nil +} + +func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSynReply + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return + } + f.headerBuf.Reset() + return +} + +func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeHeaders + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return + } + f.headerBuf.Reset() + return +} + +func (f *Framer) writeDataFrame(frame *DataFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength { + return &Error{InvalidDataFrame, frame.StreamId} + } + + // Serialize frame to Writer. + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data)) + if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil { + return + } + if _, err = f.w.Write(frame.Data); err != nil { + return + } + return nil +} diff --git a/vendor/github.com/docker/spdystream/stream.go b/vendor/github.com/docker/spdystream/stream.go new file mode 100644 index 000000000..f9e9ee267 --- /dev/null +++ b/vendor/github.com/docker/spdystream/stream.go @@ -0,0 +1,327 @@ +package spdystream + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + "github.com/docker/spdystream/spdy" +) + +var ( + ErrUnreadPartialData = errors.New("unread partial data") +) + +type Stream struct { + streamId spdy.StreamId + parent *Stream + conn *Connection + startChan chan error + + dataLock sync.RWMutex + dataChan chan []byte + unread []byte + + priority uint8 + headers http.Header + headerChan chan http.Header + finishLock sync.Mutex + finished bool + replyCond *sync.Cond + replied bool + closeLock sync.Mutex + closeChan chan bool +} + +// WriteData writes data to stream, sending a dataframe per call +func (s *Stream) WriteData(data []byte, fin bool) error { + s.waitWriteReply() + var flags spdy.DataFlags + + if fin { + flags = spdy.DataFlagFin + s.finishLock.Lock() + if s.finished { + s.finishLock.Unlock() + return ErrWriteClosedStream + } + s.finished = true + s.finishLock.Unlock() + } + + dataFrame := &spdy.DataFrame{ + StreamId: s.streamId, + Flags: flags, + Data: data, + } + + debugMessage("(%p) (%d) Writing data frame", s, s.streamId) + return s.conn.framer.WriteFrame(dataFrame) +} + +// Write writes bytes to a stream, calling write data for each call. +func (s *Stream) Write(data []byte) (n int, err error) { + err = s.WriteData(data, false) + if err == nil { + n = len(data) + } + return +} + +// Read reads bytes from a stream, a single read will never get more +// than what is sent on a single data frame, but a multiple calls to +// read may get data from the same data frame. +func (s *Stream) Read(p []byte) (n int, err error) { + if s.unread == nil { + select { + case <-s.closeChan: + return 0, io.EOF + case read, ok := <-s.dataChan: + if !ok { + return 0, io.EOF + } + s.unread = read + } + } + n = copy(p, s.unread) + if n < len(s.unread) { + s.unread = s.unread[n:] + } else { + s.unread = nil + } + return +} + +// ReadData reads an entire data frame and returns the byte array +// from the data frame. If there is unread data from the result +// of a Read call, this function will return an ErrUnreadPartialData. +func (s *Stream) ReadData() ([]byte, error) { + debugMessage("(%p) Reading data from %d", s, s.streamId) + if s.unread != nil { + return nil, ErrUnreadPartialData + } + select { + case <-s.closeChan: + return nil, io.EOF + case read, ok := <-s.dataChan: + if !ok { + return nil, io.EOF + } + return read, nil + } +} + +func (s *Stream) waitWriteReply() { + if s.replyCond != nil { + s.replyCond.L.Lock() + for !s.replied { + s.replyCond.Wait() + } + s.replyCond.L.Unlock() + } +} + +// Wait waits for the stream to receive a reply. +func (s *Stream) Wait() error { + return s.WaitTimeout(time.Duration(0)) +} + +// WaitTimeout waits for the stream to receive a reply or for timeout. +// When the timeout is reached, ErrTimeout will be returned. +func (s *Stream) WaitTimeout(timeout time.Duration) error { + var timeoutChan <-chan time.Time + if timeout > time.Duration(0) { + timeoutChan = time.After(timeout) + } + + select { + case err := <-s.startChan: + if err != nil { + return err + } + break + case <-timeoutChan: + return ErrTimeout + } + return nil +} + +// Close closes the stream by sending an empty data frame with the +// finish flag set, indicating this side is finished with the stream. +func (s *Stream) Close() error { + select { + case <-s.closeChan: + // Stream is now fully closed + s.conn.removeStream(s) + default: + break + } + return s.WriteData([]byte{}, true) +} + +// Reset sends a reset frame, putting the stream into the fully closed state. +func (s *Stream) Reset() error { + s.conn.removeStream(s) + return s.resetStream() +} + +func (s *Stream) resetStream() error { + // Always call closeRemoteChannels, even if s.finished is already true. + // This makes it so that stream.Close() followed by stream.Reset() allows + // stream.Read() to unblock. + s.closeRemoteChannels() + + s.finishLock.Lock() + if s.finished { + s.finishLock.Unlock() + return nil + } + s.finished = true + s.finishLock.Unlock() + + resetFrame := &spdy.RstStreamFrame{ + StreamId: s.streamId, + Status: spdy.Cancel, + } + return s.conn.framer.WriteFrame(resetFrame) +} + +// CreateSubStream creates a stream using the current as the parent +func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) { + return s.conn.CreateStream(headers, s, fin) +} + +// SetPriority sets the stream priority, does not affect the +// remote priority of this stream after Open has been called. +// Valid values are 0 through 7, 0 being the highest priority +// and 7 the lowest. +func (s *Stream) SetPriority(priority uint8) { + s.priority = priority +} + +// SendHeader sends a header frame across the stream +func (s *Stream) SendHeader(headers http.Header, fin bool) error { + return s.conn.sendHeaders(headers, s, fin) +} + +// SendReply sends a reply on a stream, only valid to be called once +// when handling a new stream +func (s *Stream) SendReply(headers http.Header, fin bool) error { + if s.replyCond == nil { + return errors.New("cannot reply on initiated stream") + } + s.replyCond.L.Lock() + defer s.replyCond.L.Unlock() + if s.replied { + return nil + } + + err := s.conn.sendReply(headers, s, fin) + if err != nil { + return err + } + + s.replied = true + s.replyCond.Broadcast() + return nil +} + +// Refuse sends a reset frame with the status refuse, only +// valid to be called once when handling a new stream. This +// may be used to indicate that a stream is not allowed +// when http status codes are not being used. +func (s *Stream) Refuse() error { + if s.replied { + return nil + } + s.replied = true + return s.conn.sendReset(spdy.RefusedStream, s) +} + +// Cancel sends a reset frame with the status canceled. This +// can be used at any time by the creator of the Stream to +// indicate the stream is no longer needed. +func (s *Stream) Cancel() error { + return s.conn.sendReset(spdy.Cancel, s) +} + +// ReceiveHeader receives a header sent on the other side +// of the stream. This function will block until a header +// is received or stream is closed. +func (s *Stream) ReceiveHeader() (http.Header, error) { + select { + case <-s.closeChan: + break + case header, ok := <-s.headerChan: + if !ok { + return nil, fmt.Errorf("header chan closed") + } + return header, nil + } + return nil, fmt.Errorf("stream closed") +} + +// Parent returns the parent stream +func (s *Stream) Parent() *Stream { + return s.parent +} + +// Headers returns the headers used to create the stream +func (s *Stream) Headers() http.Header { + return s.headers +} + +// String returns the string version of stream using the +// streamId to uniquely identify the stream +func (s *Stream) String() string { + return fmt.Sprintf("stream:%d", s.streamId) +} + +// Identifier returns a 32 bit identifier for the stream +func (s *Stream) Identifier() uint32 { + return uint32(s.streamId) +} + +// IsFinished returns whether the stream has finished +// sending data +func (s *Stream) IsFinished() bool { + return s.finished +} + +// Implement net.Conn interface + +func (s *Stream) LocalAddr() net.Addr { + return s.conn.conn.LocalAddr() +} + +func (s *Stream) RemoteAddr() net.Addr { + return s.conn.conn.RemoteAddr() +} + +// TODO set per stream values instead of connection-wide + +func (s *Stream) SetDeadline(t time.Time) error { + return s.conn.conn.SetDeadline(t) +} + +func (s *Stream) SetReadDeadline(t time.Time) error { + return s.conn.conn.SetReadDeadline(t) +} + +func (s *Stream) SetWriteDeadline(t time.Time) error { + return s.conn.conn.SetWriteDeadline(t) +} + +func (s *Stream) closeRemoteChannels() { + s.closeLock.Lock() + defer s.closeLock.Unlock() + select { + case <-s.closeChan: + default: + close(s.closeChan) + } +} diff --git a/vendor/github.com/docker/spdystream/utils.go b/vendor/github.com/docker/spdystream/utils.go new file mode 100644 index 000000000..1b2c199a4 --- /dev/null +++ b/vendor/github.com/docker/spdystream/utils.go @@ -0,0 +1,16 @@ +package spdystream + +import ( + "log" + "os" +) + +var ( + DEBUG = os.Getenv("DEBUG") +) + +func debugMessage(fmt string, args ...interface{}) { + if DEBUG != "" { + log.Printf(fmt, args...) + } +} diff --git a/vendor/github.com/evanphx/json-patch/.travis.yml b/vendor/github.com/evanphx/json-patch/.travis.yml index 2092c72c4..50e4afd19 100644 --- a/vendor/github.com/evanphx/json-patch/.travis.yml +++ b/vendor/github.com/evanphx/json-patch/.travis.yml @@ -1,8 +1,8 @@ language: go go: - - 1.8 - - 1.7 + - 1.14 + - 1.13 install: - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi @@ -11,6 +11,9 @@ install: script: - go get - go test -cover ./... + - cd ./v5 + - go get + - go test -cover ./... notifications: email: false diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE index 0eb9b72d8..df76d7d77 100644 --- a/vendor/github.com/evanphx/json-patch/LICENSE +++ b/vendor/github.com/evanphx/json-patch/LICENSE @@ -6,7 +6,7 @@ modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Evan Phoenix nor the names of its contributors diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md index 9c7f87f7c..121b039db 100644 --- a/vendor/github.com/evanphx/json-patch/README.md +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -1,5 +1,5 @@ # JSON-Patch -`jsonpatch` is a library which provides functionallity for both applying +`jsonpatch` is a library which provides functionality for both applying [RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). @@ -11,10 +11,11 @@ well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ie **Latest and greatest**: ```bash -go get -u github.com/evanphx/json-patch +go get -u github.com/evanphx/json-patch/v5 ``` **Stable Versions**: +* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` * Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` (previous versions below `v3` are unavailable) @@ -82,7 +83,7 @@ When ran, you get the following output: ```bash $ go run main.go patch document: {"height":null,"name":"Jane"} -updated tina doc: {"age":28,"name":"Jane"} +updated alternative doc: {"age":28,"name":"Jane"} ``` ## Create and apply a JSON Patch @@ -164,7 +165,7 @@ func main() { } if !jsonpatch.Equal(original, different) { - fmt.Println(`"original" is _not_ structurally equal to "similar"`) + fmt.Println(`"original" is _not_ structurally equal to "different"`) } } ``` @@ -173,7 +174,7 @@ When ran, you get the following output: ```bash $ go run main.go "original" is structurally equal to "similar" -"original" is _not_ structurally equal to "similar" +"original" is _not_ structurally equal to "different" ``` ## Combine merge patches diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go index 6806c4c20..14e8bb5ce 100644 --- a/vendor/github.com/evanphx/json-patch/merge.go +++ b/vendor/github.com/evanphx/json-patch/merge.go @@ -307,13 +307,16 @@ func matchesValue(av, bv interface{}) bool { return true case map[string]interface{}: bt := bv.(map[string]interface{}) - for key := range at { - if !matchesValue(at[key], bt[key]) { - return false - } + if len(bt) != len(at) { + return false } for key := range bt { - if !matchesValue(at[key], bt[key]) { + av, aOK := at[key] + bv, bOK := bt[key] + if aOK != bOK { + return false + } + if !matchesValue(av, bv) { return false } } diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go index 1b5f95e61..f185a45b2 100644 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -202,6 +202,10 @@ func (n *lazyNode) equal(o *lazyNode) bool { return false } + if len(n.doc) != len(o.doc) { + return false + } + for k, v := range n.doc { ov, ok := o.doc[k] @@ -209,6 +213,10 @@ func (n *lazyNode) equal(o *lazyNode) bool { return false } + if (v == nil) != (ov == nil) { + return false + } + if v == nil && ov == nil { continue } @@ -429,14 +437,14 @@ func (d *partialArray) add(key string, val *lazyNode) error { return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } - if SupportNegativeIndices { - if idx < -len(ary) { + if idx < 0 { + if !SupportNegativeIndices { return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } - - if idx < 0 { - idx += len(ary) + if idx < -len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } + idx += len(ary) } copy(ary[0:idx], cur[0:idx]) @@ -473,14 +481,14 @@ func (d *partialArray) remove(key string) error { return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } - if SupportNegativeIndices { - if idx < -len(cur) { + if idx < 0 { + if !SupportNegativeIndices { return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } - - if idx < 0 { - idx += len(cur) + if idx < -len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } + idx += len(cur) } ary := make([]*lazyNode, len(cur)-1) diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig new file mode 100644 index 000000000..fad895851 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig @@ -0,0 +1,12 @@ +root = true + +[*.go] +indent_style = tab +indent_size = 4 +insert_final_newline = true + +[*.{yml,yaml}] +indent_style = space +indent_size = 2 +insert_final_newline = true +trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes new file mode 100644 index 000000000..32f1001be --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitattributes @@ -0,0 +1 @@ +go.sum linguist-generated diff --git a/vendor/gopkg.in/fsnotify.v1/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore similarity index 100% rename from vendor/gopkg.in/fsnotify.v1/.gitignore rename to vendor/github.com/fsnotify/fsnotify/.gitignore diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml new file mode 100644 index 000000000..a9c30165c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.travis.yml @@ -0,0 +1,36 @@ +sudo: false +language: go + +go: + - "stable" + - "1.11.x" + - "1.10.x" + - "1.9.x" + +matrix: + include: + - go: "stable" + env: GOLINT=true + allow_failures: + - go: tip + fast_finish: true + + +before_install: + - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi + +script: + - go test --race ./... + +after_script: + - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" + - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi + - go vet ./... + +os: + - linux + - osx + - windows + +notifications: + email: false diff --git a/vendor/gopkg.in/fsnotify.v1/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS similarity index 100% rename from vendor/gopkg.in/fsnotify.v1/AUTHORS rename to vendor/github.com/fsnotify/fsnotify/AUTHORS diff --git a/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md similarity index 100% rename from vendor/gopkg.in/fsnotify.v1/CHANGELOG.md rename to vendor/github.com/fsnotify/fsnotify/CHANGELOG.md diff --git a/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md similarity index 100% rename from vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md rename to vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md diff --git a/vendor/gopkg.in/fsnotify.v1/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE similarity index 95% rename from vendor/gopkg.in/fsnotify.v1/LICENSE rename to vendor/github.com/fsnotify/fsnotify/LICENSE index f21e54080..e180c8fb0 100644 --- a/vendor/gopkg.in/fsnotify.v1/LICENSE +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -1,5 +1,5 @@ Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012 fsnotify Authors. All rights reserved. +Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/gopkg.in/fsnotify.v1/README.md b/vendor/github.com/fsnotify/fsnotify/README.md similarity index 53% rename from vendor/gopkg.in/fsnotify.v1/README.md rename to vendor/github.com/fsnotify/fsnotify/README.md index 399320741..b2629e522 100644 --- a/vendor/gopkg.in/fsnotify.v1/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -10,16 +10,16 @@ go get -u golang.org/x/sys/... Cross platform: Windows, Linux, BSD and macOS. -|Adapter |OS |Status | -|----------|----------|----------| -|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| -|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| -|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)| -|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)| -|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)| -|fanotify |Linux 2.6.37+ | | -|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)| -|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)| +| Adapter | OS | Status | +| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | +| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | \* Android and iOS are untested. @@ -33,6 +33,53 @@ All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based o Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. +## Usage + +```go +package main + +import ( + "log" + + "github.com/fsnotify/fsnotify" +) + +func main() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + done := make(chan bool) + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Op&fsnotify.Write == fsnotify.Write { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + err = watcher.Add("/tmp/foo") + if err != nil { + log.Fatal(err) + } + <-done +} +``` + ## Contributing Please refer to [CONTRIBUTING][] before opening an issue or pull request. @@ -65,6 +112,10 @@ There are OS-specific limits as to how many watches can be created: * Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. * BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. +**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** + +fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. + [#62]: https://github.com/howeyc/fsnotify/issues/62 [#18]: https://github.com/fsnotify/fsnotify/issues/18 [#11]: https://github.com/fsnotify/fsnotify/issues/11 diff --git a/vendor/gopkg.in/fsnotify.v1/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go similarity index 100% rename from vendor/gopkg.in/fsnotify.v1/fen.go rename to vendor/github.com/fsnotify/fsnotify/fen.go diff --git a/vendor/gopkg.in/fsnotify.v1/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go similarity index 95% rename from vendor/gopkg.in/fsnotify.v1/fsnotify.go rename to vendor/github.com/fsnotify/fsnotify/fsnotify.go index 190bf0de5..89cab046d 100644 --- a/vendor/gopkg.in/fsnotify.v1/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -63,4 +63,6 @@ func (e Event) String() string { } // Common errors that can be reported by a watcher -var ErrEventOverflow = errors.New("fsnotify queue overflow") +var ( + ErrEventOverflow = errors.New("fsnotify queue overflow") +) diff --git a/vendor/github.com/fsnotify/fsnotify/go.mod b/vendor/github.com/fsnotify/fsnotify/go.mod new file mode 100644 index 000000000..ff11e13f2 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/go.mod @@ -0,0 +1,5 @@ +module github.com/fsnotify/fsnotify + +go 1.13 + +require golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 diff --git a/vendor/github.com/fsnotify/fsnotify/go.sum b/vendor/github.com/fsnotify/fsnotify/go.sum new file mode 100644 index 000000000..f60af9855 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/gopkg.in/fsnotify.v1/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go similarity index 100% rename from vendor/gopkg.in/fsnotify.v1/inotify.go rename to vendor/github.com/fsnotify/fsnotify/inotify.go diff --git a/vendor/gopkg.in/fsnotify.v1/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go similarity index 97% rename from vendor/gopkg.in/fsnotify.v1/inotify_poller.go rename to vendor/github.com/fsnotify/fsnotify/inotify_poller.go index cc7db4b22..b33f2b4d4 100644 --- a/vendor/gopkg.in/fsnotify.v1/inotify_poller.go +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -40,12 +40,12 @@ func newFdPoller(fd int) (*fdPoller, error) { poller.fd = fd // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(0) + poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) if poller.epfd == -1 { return nil, errno } // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK) + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) if errno != nil { return nil, errno } diff --git a/vendor/gopkg.in/fsnotify.v1/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go similarity index 100% rename from vendor/gopkg.in/fsnotify.v1/kqueue.go rename to vendor/github.com/fsnotify/fsnotify/kqueue.go diff --git a/vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go similarity index 79% rename from vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go rename to vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go index 7d8de1451..2306c4620 100644 --- a/vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -8,4 +8,4 @@ package fsnotify import "golang.org/x/sys/unix" -const openMode = unix.O_NONBLOCK | unix.O_RDONLY +const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go similarity index 84% rename from vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go rename to vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go index 9139e1716..870c4d6d1 100644 --- a/vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -9,4 +9,4 @@ package fsnotify import "golang.org/x/sys/unix" // note: this constant is not defined on BSD -const openMode = unix.O_EVTONLY +const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/vendor/gopkg.in/fsnotify.v1/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go similarity index 100% rename from vendor/gopkg.in/fsnotify.v1/windows.go rename to vendor/github.com/fsnotify/fsnotify/windows.go diff --git a/vendor/github.com/gardener/external-dns-management/LICENSE.md b/vendor/github.com/gardener/external-dns-management/LICENSE.md new file mode 100644 index 000000000..f178789f4 --- /dev/null +++ b/vendor/github.com/gardener/external-dns-management/LICENSE.md @@ -0,0 +1,320 @@ +``` + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +``` + +## APIs + +This project may include APIs to SAP or third party products or services. The use of these APIs, products and services may be subject to additional agreements. In no event shall the application of the Apache Software License, v.2 to this project grant any rights in or to these APIs, products or services that would alter, expand, be inconsistent with, or supersede any terms of these additional agreements. API means application programming interfaces, as well as their respective specifications and implementing code that allows other software products to communicate with or call on SAP or third party products or services (for example, SAP Enterprise Services, BAPIs, Idocs, RFCs and ABAP calls or other user exits) and may be made available through SAP or third party products, SDKs, documentation or other media. + +## Subcomponents + +This project includes the following subcomponents that are subject to separate license terms. +Your use of these subcomponents is subject to the separate license terms applicable to +each subcomponent. + +Gardener Controller Manager Library +Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. +Apache 2 license (https://github.com/gardener/controller-manager-library/blob/master/LICENSE.md) + +APIMachinery +https://git.k8s.io/apimachinery +Copyright 2017 The Kubernetes Authors +Apache 2 license (https://git.k8s.io/apimachinery/LICENSE) + +Client-Go +https://git.k8s.io/client-go +Copyright 2017 The Kubernetes Authors +Apache 2 license (https://git.k8s.io/client-go/LICENSE) + +Code-Generator +https://git.k8s.io/code-generator +Copyright 2017 The Kubernetes Authors +Apache 2 license (https://git.k8s.io/code-generator/LICENSE) + +Cobra. +https://github.com/spf13/cobra +Copyright © 2013 Steve Francia +Apache 2 license (https://git.k8s.io/code-generator/LICENSE) + +Logrus. +https://github.com/sirupsen/logrus +Copyright (c) 2014 Simon Eskildsen +MIT license (https://github.com/sirupsen/logrus/blob/master/LICENSE) + +Google DNS SDK +https://github.com/googleapis/google-api-go-client +Copyright (c) 2011 Google Inc. All rights reserved +BSD 3-Clause "New" or "Revised" License (https://github.com/googleapis/google-api-go-client/blob/master/LICENSE) + +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. +Apache 2 license (https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) + +Some code fragments have been extracted from kubernetes-incubator/external-dns +Copyright 2017 The Kubernetes Authors. +Apache 2 license (https://github.com/kubernetes-incubator/external-dns/blob/master/LICENSE) + +------ + +## MIT License + + SPDX short identifier: MIT + +``` + The MIT License (MIT) + + Copyright (c) + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. +``` + +## The 3-Clause BSD License + + SPDX short identifier: BSD-3-Clause + +``` + Copyright + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +``` diff --git a/vendor/github.com/gardener/external-dns-management/NOTICE.md b/vendor/github.com/gardener/external-dns-management/NOTICE.md new file mode 100644 index 000000000..def0add2c --- /dev/null +++ b/vendor/github.com/gardener/external-dns-management/NOTICE.md @@ -0,0 +1,2 @@ +## External DNS Management +Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. diff --git a/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/register.go b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/register.go new file mode 100644 index 000000000..a43fe39c5 --- /dev/null +++ b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/register.go @@ -0,0 +1,25 @@ +/* + * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * + */ + +//go:generate bash ../../../vendor/github.com/gardener/controller-manager-library/hack/generate-crds +//go:generate bash ../../../hack/generate-code +// +kubebuilder:skip + +package dns + +const ( + GroupName = "dns.gardener.cloud" +) diff --git a/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsannotation.go b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsannotation.go new file mode 100644 index 000000000..7d17d6688 --- /dev/null +++ b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsannotation.go @@ -0,0 +1,81 @@ +/* + * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * + */ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type DNSAnnotationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + Items []DNSAnnotation `json:"items"` +} + +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Namespaced,path=dnsannotations,shortName=dnsa,singular=dnsannotation +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name=RefGroup,JSONPath=".spec.resourceRef.apiVersion",type=string +// +kubebuilder:printcolumn:name=RefKind,JSONPath=".spec.resourceRef.kind",type=string +// +kubebuilder:printcolumn:name=RefName,JSONPath=".spec.resourceRef.name",type=string +// +kubebuilder:printcolumn:name=RefNamespace,JSONPath=".spec.resourceRef.namespace",type=string +// +kubebuilder:printcolumn:name=Active,JSONPath=".status.active",type=boolean +// +kubebuilder:printcolumn:name=Age,JSONPath=".metadata.creationTimestamp",type=date +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type DNSAnnotation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DNSAnnotationSpec `json:"spec"` + // +optional + Status DNSAnnotationStatus `json:"status,omitempty"` +} + +type DNSAnnotationSpec struct { + ResourceRef ResourceReference `json:"resourceRef"` + Annotations map[string]string `json:"annotations"` +} + +type ResourceReference struct { + // API Version of the annotated object + APIVersion string `json:"apiVersion"` + // Kind of the annotated object + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + Kind string `json:"kind"` + // Name of the annotated object + // +optional + Name string `json:"name,omitempty"` + // Namspace of the annotated object + // Defaulted by the namespace of the containing resource. + // +optional + Namespace string `json:"namespace,omitempty"` +} + +type DNSAnnotationStatus struct { + // Indicates that annotation is observed by a DNS sorce controller + // +optional + Active bool `json:"active,omitempty"` + // In case of a configuration problem this field describes the reason + // +optional + Message string `json:"message,omitempty"` +} diff --git a/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsentry.go b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsentry.go new file mode 100644 index 000000000..7aaf46670 --- /dev/null +++ b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsentry.go @@ -0,0 +1,108 @@ +/* + * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * + */ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type DNSEntryList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + Items []DNSEntry `json:"items"` +} + +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Namespaced,path=dnsentries,shortName=dnse,singular=dnsentry +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name=DNS,description="FQDN of DNS Entry",JSONPath=".spec.dnsName",type=string +// +kubebuilder:printcolumn:name=OWNERID,JSONPath=".spec.ownerId",type=string +// +kubebuilder:printcolumn:name=TYPE,JSONPath=".status.providerType",type=string +// +kubebuilder:printcolumn:name=PROVIDER,JSONPath=".status.provider",type=string +// +kubebuilder:printcolumn:name=STATUS,JSONPath=".status.state",type=string +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type DNSEntry struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DNSEntrySpec `json:"spec"` + // +optional + Status DNSEntryStatus `json:"status,omitempty"` +} + +type DNSEntrySpec struct { + // full qualified domain name + DNSName string `json:"dnsName"` + // reference to base entry used to inherit attributes from + // +optional + Reference *EntryReference `json:"reference,omitempty"` + // owner id used to tag entries in external DNS system + // +optional + OwnerId *string `json:"ownerId,omitempty"` + // time to live for records in external DNS system + // +optional + TTL *int64 `json:"ttl,omitempty"` + // lookup interval for CNAMEs that must be resolved to IP addresses + // +optional + CNameLookupInterval *int64 `json:"cnameLookupInterval,omitempty"` + // text records, either text or targets must be specified + // +optional + Text []string `json:"text,omitempty"` + // target records (CNAME or A records), either text or targets must be specified + // +optional + Targets []string `json:"targets,omitempty"` +} + +type DNSEntryStatus struct { + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // entry state + // +optional + State string `json:"state"` + // message describing the reason for the state + // +optional + Message *string `json:"message,omitempty"` + // provider type used for the entry + // +optional + ProviderType *string `json:"providerType,omitempty"` + // assigned provider + // +optional + Provider *string `json:"provider,omitempty"` + // zone used for the entry + // +optional + Zone *string `json:"zone,omitempty"` + // time to live used for the entry + // +optional + TTL *int64 `json:"ttl,omitempty"` + // effective targets generated for the entry + // +optional + Targets []string `json:"targets,omitempty"` +} + +type EntryReference struct { + // name of the referenced DNSEntry object + Name string `json:"name"` + // namespace of the referenced DNSEntry object + // +optional + Namespace string `json:"namespace,omitempty"` +} diff --git a/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsowner.go b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsowner.go new file mode 100644 index 000000000..0c23b3302 --- /dev/null +++ b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsowner.go @@ -0,0 +1,73 @@ +/* + * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * + */ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type DNSOwnerList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + Items []DNSOwner `json:"items"` +} + +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Cluster,path=dnsowners,shortName=dnso,singular=dnsowner +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name=OwnerId,JSONPath=".spec.ownerId",type=string +// +kubebuilder:printcolumn:name=Active,JSONPath=".spec.active",type=boolean +// +kubebuilder:printcolumn:name=Usages,JSONPath=".status.amount",type=string +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type DNSOwner struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DNSOwnerSpec `json:"spec"` + // +optional + Status DNSOwnerStatus `json:"status,omitempty"` +} + +type DNSOwnerSpec struct { + // owner id used to tag entries in external DNS system + OwnerId string `json:"ownerId"` + // state of the ownerid for the DNS controller observing entry using this owner id + // (default:true) + // +optional + Active *bool `json:"active,omitempty"` +} + +type DNSOwnerStatus struct { + // Entry statistic for this owner id + // +optional + Entries DNSOwnerStatusEntries `json:"entries,omitempty"` +} + +type DNSOwnerStatusEntries struct { + // number of entries using this owner id + // +optional + Amount int `json:"amount"` + // number of entries per provider type + // +optional + ByType map[string]int `json:"types,omitempty"` +} diff --git a/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsprovider.go b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsprovider.go new file mode 100644 index 000000000..793890cb1 --- /dev/null +++ b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/dnsprovider.go @@ -0,0 +1,105 @@ +/* + * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * + */ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type DNSProviderList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + Items []DNSProvider `json:"items"` +} + +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Namespaced,path=dnsproviders,shortName=dnspr,singular=dnsprovider +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name=TYPE,JSONPath=".spec.type",type=string +// +kubebuilder:printcolumn:name=STATUS,JSONPath=".status.state",type=string +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type DNSProvider struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DNSProviderSpec `json:"spec"` + // +optional + Status DNSProviderStatus `json:"status,omitempty"` +} + +type DNSProviderSpec struct { + // type of the provider (selecting the responsible type of DNS controller) + Type string `json:"type,omitempty"` + // optional additional provider specific configuration values + // +kubebuilder:validation:XPreserveUnknownFields + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty"` + // access credential for the external DNS system of the given type + SecretRef *corev1.SecretReference `json:"secretRef,omitempty"` + // desired selection of usable domains + // (by default all zones and domains in those zones will be served) + // +optional + Domains *DNSSelection `json:"domains,omitempty"` + // desired selection of usable domains + // the domain selection is used for served zones, only + // (by default all zones will be served) + // +optional + Zones *DNSSelection `json:"zones,omitempty"` +} + +type DNSSelection struct { + // values that should be observed (domains or zones) + // + optional + Include []string `json:"include,omitempty"` + // values that should be ignored (domains or zones) + // + optional + Exclude []string `json:"exclude,omitempty"` +} + +type DNSProviderStatus struct { + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // state of the provider + // +optional + State string `json:"state"` + // message describing the reason for the actual state of the provider + Message *string `json:"message,omitempty"` + // actually served domain selection + // +optional + Domains DNSSelectionStatus `json:"domains"` + // actually served zones + // +optional + Zones DNSSelectionStatus `json:"zones"` +} + +type DNSSelectionStatus struct { + // included values (domains or zones) + // + optional + Included []string `json:"included,omitempty"` + // Excluded values (domains or zones) + // + optional + Excluded []string `json:"excluded,omitempty"` +} diff --git a/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/doc.go b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/doc.go new file mode 100644 index 000000000..35b9da67a --- /dev/null +++ b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/doc.go @@ -0,0 +1,21 @@ +/* + * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * + */ + +// +k8s:deepcopy-gen=package,register + +// Package v1alpha1 is the v1alpha1 version of the API. +// +groupName=dns.gardener.cloud +package v1alpha1 diff --git a/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/register.go b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/register.go new file mode 100644 index 000000000..1e9a627ec --- /dev/null +++ b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/register.go @@ -0,0 +1,75 @@ +/* + * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * + */ + +package v1alpha1 + +import ( + "github.com/gardener/external-dns-management/pkg/apis/dns" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + Version = "v1alpha1" + GroupName = dns.GroupName + + DNSOwnerKind = "DNSOwner" + DNSOwnerPlural = "dnsowners" + + DNSProviderKind = "DNSProvider" + DNSProviderPlural = "dnsproviders" + + DNSEntryKind = "DNSEntry" + DNSEntryPlural = "dnsentries" + + DNSAnnotationKind = "DNSAnnotation" + DNSAnnotationPlural = "dnsannotations" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: dns.GroupName, Version: Version} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resources and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &DNSOwner{}, + &DNSOwnerList{}, + &DNSProvider{}, + &DNSProviderList{}, + &DNSEntry{}, + &DNSEntryList{}, + &DNSAnnotation{}, + &DNSAnnotationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/state.go b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/state.go new file mode 100644 index 000000000..f23b73156 --- /dev/null +++ b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/state.go @@ -0,0 +1,24 @@ +/* + * Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * + */ + +package v1alpha1 + +const STATE_PENDING = "Pending" +const STATE_ERROR = "Error" +const STATE_INVALID = "Invalid" +const STATE_STALE = "Stale" +const STATE_READY = "Ready" +const STATE_DELETING = "Deleting" diff --git a/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..aa2c0ee4b --- /dev/null +++ b/vendor/github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,606 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSAnnotation) DeepCopyInto(out *DNSAnnotation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSAnnotation. +func (in *DNSAnnotation) DeepCopy() *DNSAnnotation { + if in == nil { + return nil + } + out := new(DNSAnnotation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSAnnotation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSAnnotationList) DeepCopyInto(out *DNSAnnotationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNSAnnotation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSAnnotationList. +func (in *DNSAnnotationList) DeepCopy() *DNSAnnotationList { + if in == nil { + return nil + } + out := new(DNSAnnotationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSAnnotationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSAnnotationSpec) DeepCopyInto(out *DNSAnnotationSpec) { + *out = *in + out.ResourceRef = in.ResourceRef + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSAnnotationSpec. +func (in *DNSAnnotationSpec) DeepCopy() *DNSAnnotationSpec { + if in == nil { + return nil + } + out := new(DNSAnnotationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSAnnotationStatus) DeepCopyInto(out *DNSAnnotationStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSAnnotationStatus. +func (in *DNSAnnotationStatus) DeepCopy() *DNSAnnotationStatus { + if in == nil { + return nil + } + out := new(DNSAnnotationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSEntry) DeepCopyInto(out *DNSEntry) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEntry. +func (in *DNSEntry) DeepCopy() *DNSEntry { + if in == nil { + return nil + } + out := new(DNSEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSEntry) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSEntryList) DeepCopyInto(out *DNSEntryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNSEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEntryList. +func (in *DNSEntryList) DeepCopy() *DNSEntryList { + if in == nil { + return nil + } + out := new(DNSEntryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSEntryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSEntrySpec) DeepCopyInto(out *DNSEntrySpec) { + *out = *in + if in.Reference != nil { + in, out := &in.Reference, &out.Reference + *out = new(EntryReference) + **out = **in + } + if in.OwnerId != nil { + in, out := &in.OwnerId, &out.OwnerId + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(int64) + **out = **in + } + if in.CNameLookupInterval != nil { + in, out := &in.CNameLookupInterval, &out.CNameLookupInterval + *out = new(int64) + **out = **in + } + if in.Text != nil { + in, out := &in.Text, &out.Text + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEntrySpec. +func (in *DNSEntrySpec) DeepCopy() *DNSEntrySpec { + if in == nil { + return nil + } + out := new(DNSEntrySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSEntryStatus) DeepCopyInto(out *DNSEntryStatus) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } + if in.ProviderType != nil { + in, out := &in.ProviderType, &out.ProviderType + *out = new(string) + **out = **in + } + if in.Provider != nil { + in, out := &in.Provider, &out.Provider + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(int64) + **out = **in + } + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEntryStatus. +func (in *DNSEntryStatus) DeepCopy() *DNSEntryStatus { + if in == nil { + return nil + } + out := new(DNSEntryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSOwner) DeepCopyInto(out *DNSOwner) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOwner. +func (in *DNSOwner) DeepCopy() *DNSOwner { + if in == nil { + return nil + } + out := new(DNSOwner) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSOwner) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSOwnerList) DeepCopyInto(out *DNSOwnerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNSOwner, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOwnerList. +func (in *DNSOwnerList) DeepCopy() *DNSOwnerList { + if in == nil { + return nil + } + out := new(DNSOwnerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSOwnerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSOwnerSpec) DeepCopyInto(out *DNSOwnerSpec) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOwnerSpec. +func (in *DNSOwnerSpec) DeepCopy() *DNSOwnerSpec { + if in == nil { + return nil + } + out := new(DNSOwnerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSOwnerStatus) DeepCopyInto(out *DNSOwnerStatus) { + *out = *in + in.Entries.DeepCopyInto(&out.Entries) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOwnerStatus. +func (in *DNSOwnerStatus) DeepCopy() *DNSOwnerStatus { + if in == nil { + return nil + } + out := new(DNSOwnerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSOwnerStatusEntries) DeepCopyInto(out *DNSOwnerStatusEntries) { + *out = *in + if in.ByType != nil { + in, out := &in.ByType, &out.ByType + *out = make(map[string]int, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOwnerStatusEntries. +func (in *DNSOwnerStatusEntries) DeepCopy() *DNSOwnerStatusEntries { + if in == nil { + return nil + } + out := new(DNSOwnerStatusEntries) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSProvider) DeepCopyInto(out *DNSProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSProvider. +func (in *DNSProvider) DeepCopy() *DNSProvider { + if in == nil { + return nil + } + out := new(DNSProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSProviderList) DeepCopyInto(out *DNSProviderList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNSProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSProviderList. +func (in *DNSProviderList) DeepCopy() *DNSProviderList { + if in == nil { + return nil + } + out := new(DNSProviderList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSProviderList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSProviderSpec) DeepCopyInto(out *DNSProviderSpec) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(v1.SecretReference) + **out = **in + } + if in.Domains != nil { + in, out := &in.Domains, &out.Domains + *out = new(DNSSelection) + (*in).DeepCopyInto(*out) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = new(DNSSelection) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSProviderSpec. +func (in *DNSProviderSpec) DeepCopy() *DNSProviderSpec { + if in == nil { + return nil + } + out := new(DNSProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSProviderStatus) DeepCopyInto(out *DNSProviderStatus) { + *out = *in + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } + in.Domains.DeepCopyInto(&out.Domains) + in.Zones.DeepCopyInto(&out.Zones) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSProviderStatus. +func (in *DNSProviderStatus) DeepCopy() *DNSProviderStatus { + if in == nil { + return nil + } + out := new(DNSProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSSelection) DeepCopyInto(out *DNSSelection) { + *out = *in + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Exclude != nil { + in, out := &in.Exclude, &out.Exclude + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSelection. +func (in *DNSSelection) DeepCopy() *DNSSelection { + if in == nil { + return nil + } + out := new(DNSSelection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSSelectionStatus) DeepCopyInto(out *DNSSelectionStatus) { + *out = *in + if in.Included != nil { + in, out := &in.Included, &out.Included + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Excluded != nil { + in, out := &in.Excluded, &out.Excluded + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSelectionStatus. +func (in *DNSSelectionStatus) DeepCopy() *DNSSelectionStatus { + if in == nil { + return nil + } + out := new(DNSSelectionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EntryReference) DeepCopyInto(out *EntryReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EntryReference. +func (in *EntryReference) DeepCopy() *EntryReference { + if in == nil { + return nil + } + out := new(EntryReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceReference) DeepCopyInto(out *ResourceReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceReference. +func (in *ResourceReference) DeepCopy() *ResourceReference { + if in == nil { + return nil + } + out := new(ResourceReference) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/gardener/gardener-resource-manager/LICENSE.md b/vendor/github.com/gardener/gardener-resource-manager/LICENSE.md new file mode 100644 index 000000000..065c1271e --- /dev/null +++ b/vendor/github.com/gardener/gardener-resource-manager/LICENSE.md @@ -0,0 +1,288 @@ +``` + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +``` + +## APIs + +This project may include APIs to SAP or third party products or services. The use of these APIs, products and services may be subject to additional agreements. In no event shall the application of the Apache Software License, v.2 to this project grant any rights in or to these APIs, products or services that would alter, expand, be inconsistent with, or supersede any terms of these additional agreements. API means application programming interfaces, as well as their respective specifications and implementing code that allows other software products to communicate with or call on SAP or third party products or services (for example, SAP Enterprise Services, BAPIs, Idocs, RFCs and ABAP calls or other user exits) and may be made available through SAP or third party products, SDKs, documentation or other media. + +## Subcomponents + +This project includes the following subcomponents that are subject to separate license terms. +Your use of these subcomponents is subject to the separate license terms applicable to +each subcomponent. + +Gardener. +https://github.com/gardener/gardener. +Copyright (c) 2019 SAP SE or an SAP affiliate company. +Apache 2 license (https://github.com/gardener/gardener/blob/master/LICENSE.md). + +controller-runtime. +https://sigs.k8s.io/controller-runtime. +Copyright 2019 The Kubernetes Authors. +Apache 2 license (https://sigs.k8s.io/controller-runtime/LICENSE). + +API. +https://git.k8s.io/api. +Copyright 2019 The Kubernetes Authors. +Apache 2 license (https://git.k8s.io/api/LICENSE). + +APIMachinery. +https://git.k8s.io/apimachinery. +Copyright 2019 The Kubernetes Authors. +Apache 2 license (https://git.k8s.io/apimachinery/LICENSE). + +Client-Go. +https://git.k8s.io/client-go. +Copyright 2017 The Kubernetes Authors. +Apache 2 license (https://git.k8s.io/client-go/LICENSE). + +YAML marshaling and unmarshalling support for Go. +gopkg.in/yaml.v2. +Copyright 2011-2016 Canonical Ltd. +Apache 2 license (https://github.com/go-yaml/yaml/blob/v2/LICENSE) + +Packr. +https://github.com/gobuffalo/packr +Copyright (c) 2016 Mark Bates. +MIT license (https://github.com/gobuffalo/packr/blob/master/LICENSE.txt) + +Cobra. +https://github.com/spf13/cobra +Copyright 2019 Steve Francia. +Apache 2 license (https://github.com/spf13/cobra/blob/master/LICENSE.txt) + +Ginkgo. +https://github.com/onsi/ginkgo. +Copyright (c) 2013-2014 Onsi Fakhouri. +MIT license (https://github.com/onsi/ginkgo/blob/master/LICENSE) + +Gomega. +github.com/onsi/gomega. +Copyright (c) 2013-2014 Onsi Fakhouri. +MIT license (https://github.com/onsi/gomega/blob/master/LICENSE) + +------ +## MIT License + + The MIT License (MIT) + + Copyright (c) + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. diff --git a/vendor/github.com/gardener/gardener-resource-manager/NOTICE.md b/vendor/github.com/gardener/gardener-resource-manager/NOTICE.md new file mode 100644 index 000000000..6e3aae0b3 --- /dev/null +++ b/vendor/github.com/gardener/gardener-resource-manager/NOTICE.md @@ -0,0 +1,15 @@ +## Gardener Resource Manager +Copyright (c) 2017-2019 SAP SE or an SAP affiliate company. All rights reserved. + +## Seed Source + +The source code of this component was seeded based on a copy of the following files from [github.com/kubernetes-sigs](github.com/kubernetes-sigs): + +controller-runtime. +https://sigs.k8s.io/controller-runtime. +Copyright 2018 The Kubernetes Authors. +Apache 2 license (https://sigs.k8s.io/controller-runtime/LICENSE). + +Version: 0.1.9. +Commit-ID: f6f0bc9611363b43664d08fb097ab13243ef621d +Commit-Message: Merge pull request #263 from DirectXMan12/release/v0.1.9 diff --git a/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/register.go b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/register.go new file mode 100644 index 000000000..112a3f118 --- /dev/null +++ b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/register.go @@ -0,0 +1,18 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resources + +// GroupName is the group name use in this package +const GroupName = "resources.gardener.cloud" diff --git a/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/doc.go b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/doc.go new file mode 100644 index 000000000..19823fcfc --- /dev/null +++ b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=github.com/gardener/gardener-resource-manager/pkg/apis/resources +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +//go:generate ../../../../hack/update-codegen.sh + +// Package v1alpha1 contains the configuration of the Gardener Resource Manager. +// +groupName=resources.gardener.cloud +package v1alpha1 // import "github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1" diff --git a/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/register.go b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/register.go new file mode 100644 index 000000000..ec3097f41 --- /dev/null +++ b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/register.go @@ -0,0 +1,51 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + resources "github.com/gardener/gardener-resource-manager/pkg/apis/resources" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: resources.GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ManagedResource{}, + &ManagedResourceList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/types.go b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/types.go new file mode 100644 index 000000000..ca431ee36 --- /dev/null +++ b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/types.go @@ -0,0 +1,170 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // Ignore is an annotation that dictates whether a resources should be ignored during + // reconciliation. + Ignore = "resources.gardener.cloud/ignore" + // DeleteOnInvalidUpdate is a constant for an annotation on a resource managed by a ManagedResource. If set to + // true then the controller will delete the object in case it faces an "Invalid" response during an update operation. + DeleteOnInvalidUpdate = "resources.gardener.cloud/delete-on-invalid-update" + // KeepObject is a constant for an annotation on a resource managed by a ManagedResource. If set to + // true then the controller will not delete the object in case it is removed from the ManagedResource or the + // ManagedResource itself is deleted. + KeepObject = "resources.gardener.cloud/keep-object" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ManagedResource describes a list of managed resources. +type ManagedResource struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + metav1.ObjectMeta `json:"metadata,omitempty"` + // Spec contains the specification of this managed resource. + Spec ManagedResourceSpec `json:"spec,omitempty"` + // Status contains the status of this managed resource. + Status ManagedResourceStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ManagedResourceList is a list of ManagedResource resources. +type ManagedResourceList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is the list of ManagedResource. + Items []ManagedResource `json:"items"` +} + +type ManagedResourceSpec struct { + // Class holds the resource class used to control the responsibility for multiple resource manager instances + // +optional + Class *string `json:"class,omitempty"` + // SecretRefs is a list of secret references. + SecretRefs []corev1.LocalObjectReference `json:"secretRefs"` + // InjectLabels injects the provided labels into every resource that is part of the referenced secrets. + // +optional + InjectLabels map[string]string `json:"injectLabels,omitempty"` + // ForceOverwriteLabels specifies that all existing labels should be overwritten. Defaults to false. + // +optional + ForceOverwriteLabels *bool `json:"forceOverwriteLabels,omitempty"` + // ForceOverwriteAnnotations specifies that all existing annotations should be overwritten. Defaults to false. + // +optional + ForceOverwriteAnnotations *bool `json:"forceOverwriteAnnotations,omitempty"` + // KeepObjects specifies whether the objects should be kept although the managed resource has already been deleted. + // Defaults to false. + // +optional + KeepObjects *bool `json:"keepObjects,omitempty"` + // Equivalences specifies possible group/kind equivalences for objects. + // +optional + Equivalences [][]metav1.GroupKind `json:"equivalences,omitempty"` + // DeletePersistentVolumeClaims specifies if PersistentVolumeClaims created by StatefulSets, which are managed by this + // resource, should also be deleted when the corresponding StatefulSet is deleted (defaults to false). + // +optional + DeletePersistentVolumeClaims *bool `json:"deletePersistentVolumeClaims,omitempty"` +} + +// ManagedResourceStatus is the status of a managed resource. +type ManagedResourceStatus struct { + Conditions []ManagedResourceCondition `json:"conditions,omitempty"` + // ObservedGeneration is the most recent generation observed for this resource. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // Resources is a list of objects that have been created. + // +optional + Resources []ObjectReference `json:"resources,omitempty"` +} + +type ObjectReference struct { + corev1.ObjectReference `json:",inline"` + // Labels is a map of labels that were used during last update of the resource. + Labels map[string]string `json:"labels,omitempty"` + // Annotations is a map of annotations that were used during last update of the resource. + Annotations map[string]string `json:"annotations,omitempty"` +} + +// ConditionType is the type of a condition. +type ConditionType string + +const ( + // ResourcesApplied is a condition type that indicates whether all resources are applied to the target cluster. + ResourcesApplied ConditionType = "ResourcesApplied" + // ResourcesHealthy is a condition type that indicates whether all resources are present and healthy. + ResourcesHealthy ConditionType = "ResourcesHealthy" +) + +// ConditionStatus is the status of a condition. +type ConditionStatus string + +// These are valid condition statuses. +const ( + // ConditionTrue means a resource is in the condition. + ConditionTrue ConditionStatus = "True" + // ConditionFalse means a resource is not in the condition. + ConditionFalse ConditionStatus = "False" + // ConditionUnknown means that the controller can't decide if a resource is in the condition or not + ConditionUnknown ConditionStatus = "Unknown" + // ConditionProgressing means that the controller is currently acting on the resource and the condition is therefore progressing. + ConditionProgressing ConditionStatus = "Progressing" +) + +// These are well-known reasons for ManagedResourceConditions. +const ( + // ConditionApplySucceeded indicates that the `ResourcesApplied` condition is `True`, + // because all resources have been applied successfully. + ConditionApplySucceeded = "ApplySucceeded" + // ConditionApplyFailed indicates that the `ResourcesApplied` condition is `False`, + // because applying the resources failed. + ConditionApplyFailed = "ApplyFailed" + // ConditionDecodingFailed indicates that the `ResourcesApplied` condition is `False`, + // because decoding the resources of the ManagedResource failed. + ConditionDecodingFailed = "DecodingFailed" + // ConditionApplyProgressing indicates that the `ResourcesApplied` condition is `Progressing`, + // because the resources are currently being reconciled. + ConditionApplyProgressing = "ApplyProgressing" + // ConditionDeletionFailed indicates that the `ResourcesApplied` condition is `False`, + // because deleting the resources failed. + ConditionDeletionFailed = "DeletionFailed" + // ConditionDeletionPending indicates that the `ResourcesApplied` condition is `Progressing`, + // because the deletion of some resources are still pending. + ConditionDeletionPending = "DeletionPending" + // ConditionHealthChecksPending indicates that the `ResourcesHealthy` condition is `Unknown`, + // because the health checks have not been completely executed yet for the current set of resources. + ConditionHealthChecksPending = "HealthChecksPending" +) + +// ManagedResourceCondition describes the state of a deployment at a certain period. +type ManagedResourceCondition struct { + // Type of the ManagedResource condition. + Type ConditionType `json:"type"` + // Status of the ManagedResource condition. + Status ConditionStatus `json:"status"` + // Last time the condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason"` + // A human readable message indicating details about the transition. + Message string `json:"message"` +} diff --git a/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..0ed4b79e5 --- /dev/null +++ b/vendor/github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,231 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedResource) DeepCopyInto(out *ManagedResource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedResource. +func (in *ManagedResource) DeepCopy() *ManagedResource { + if in == nil { + return nil + } + out := new(ManagedResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedResource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedResourceCondition) DeepCopyInto(out *ManagedResourceCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedResourceCondition. +func (in *ManagedResourceCondition) DeepCopy() *ManagedResourceCondition { + if in == nil { + return nil + } + out := new(ManagedResourceCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedResourceList) DeepCopyInto(out *ManagedResourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ManagedResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedResourceList. +func (in *ManagedResourceList) DeepCopy() *ManagedResourceList { + if in == nil { + return nil + } + out := new(ManagedResourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedResourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedResourceSpec) DeepCopyInto(out *ManagedResourceSpec) { + *out = *in + if in.Class != nil { + in, out := &in.Class, &out.Class + *out = new(string) + **out = **in + } + if in.SecretRefs != nil { + in, out := &in.SecretRefs, &out.SecretRefs + *out = make([]v1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.InjectLabels != nil { + in, out := &in.InjectLabels, &out.InjectLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ForceOverwriteLabels != nil { + in, out := &in.ForceOverwriteLabels, &out.ForceOverwriteLabels + *out = new(bool) + **out = **in + } + if in.ForceOverwriteAnnotations != nil { + in, out := &in.ForceOverwriteAnnotations, &out.ForceOverwriteAnnotations + *out = new(bool) + **out = **in + } + if in.KeepObjects != nil { + in, out := &in.KeepObjects, &out.KeepObjects + *out = new(bool) + **out = **in + } + if in.Equivalences != nil { + in, out := &in.Equivalences, &out.Equivalences + *out = make([][]metav1.GroupKind, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make([]metav1.GroupKind, len(*in)) + copy(*out, *in) + } + } + } + if in.DeletePersistentVolumeClaims != nil { + in, out := &in.DeletePersistentVolumeClaims, &out.DeletePersistentVolumeClaims + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedResourceSpec. +func (in *ManagedResourceSpec) DeepCopy() *ManagedResourceSpec { + if in == nil { + return nil + } + out := new(ManagedResourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedResourceStatus) DeepCopyInto(out *ManagedResourceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ManagedResourceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ObjectReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedResourceStatus. +func (in *ManagedResourceStatus) DeepCopy() *ManagedResourceStatus { + if in == nil { + return nil + } + out := new(ManagedResourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + out.ObjectReference = in.ObjectReference + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/gardener/gardener/NOTICE.md b/vendor/github.com/gardener/gardener/NOTICE.md index e1fa594eb..d6e6ad838 100644 --- a/vendor/github.com/gardener/gardener/NOTICE.md +++ b/vendor/github.com/gardener/gardener/NOTICE.md @@ -25,12 +25,8 @@ The following charts of the [`charts`](charts) directory were seeded based on a * [git.k8s.io/charts/stable/nginx-ingress](https://git.k8s.io/charts/stable/nginx-ingress) * [git.k8s.io/charts/stable/prometheus](https://git.k8s.io/charts/stable/prometheus) * [git.k8s.io/charts/stable/kubernetes-dashboard](https://git.k8s.io/charts/stable/kubernetes-dashboard) -* [github.com/kubernetes/kubernetes/cluster/addons/fluentd-elasticsearch/es-service.yaml](https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/fluentd-elasticsearch/es-service.yaml) -* [github.com/kubernetes/kubernetes/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml](https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml) -* [github.com/kubernetes/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml](https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml) -* [github.com/kubernetes/kubernetes/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml](https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml) -* [github.com/kubernetes/kubernetes/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml](https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml) -* [github.com/kubernetes/kubernetes/cluster/addons/fluentd-elasticsearch/kibana-service.yaml](https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/fluentd-elasticsearch/kibana-service.yaml) +* [github.com/grafana/loki/blob/master/production/helm/loki/templates/statefulset.yaml](https://github.com/grafana/loki/blob/master/production/helm/loki/templates/statefulset.yaml) +* [github.com/grafana/loki/blob/master/production/helm/loki/templates/service.yaml](https://github.com/grafana/loki/blob/master/production/helm/loki/templates/service.yaml) ## Container Images diff --git a/vendor/github.com/gardener/gardener/hack/.ci/component_descriptor b/vendor/github.com/gardener/gardener/hack/.ci/component_descriptor index 7f62442f2..c2668225f 100755 --- a/vendor/github.com/gardener/gardener/hack/.ci/component_descriptor +++ b/vendor/github.com/gardener/gardener/hack/.ci/component_descriptor @@ -8,20 +8,14 @@ descriptor_out_file="${COMPONENT_DESCRIPTOR_PATH}" echo "enriching creating component descriptor from ${BASE_DEFINITION_PATH}" -if [[ -f "$repo_root_dir/charts/images.yaml" ]]; then - images="$(yaml2json < "$repo_root_dir/charts/images.yaml")" - eval "$(jq -r ".images | - map(select(.sourceRepository != \"$repo_name\") | - if (.name == \"hyperkube\" or .name == \"kube-apiserver\" or .name == \"kube-controller-manager\" or .name == \"kube-scheduler\" or .name == \"kube-proxy\" or .repository == \"k8s.gcr.io/hyperkube\") then - \"--generic-dependencies '{\\\"name\\\": \\\"\" + .name + \"\\\", \\\"version\\\": \\\"\" + .tag + \"\\\"}'\" - elif (.repository | startswith(\"eu.gcr.io/gardener-project/gardener\")) then - \"--component-dependencies '{\\\"name\\\": \\\"\" + .sourceRepository + \"\\\", \\\"version\\\": \\\"\" + .tag + \"\\\"}'\" - else - \"--container-image-dependencies '{\\\"name\\\": \\\"\" + .name + \"\\\", \\\"image_reference\\\": \\\"\" + .repository + \":\" + .tag + \"\\\", \\\"version\\\": \\\"\" + .tag + \"\\\"}'\" - end) | - \"${ADD_DEPENDENCIES_CMD} \\\\\n\" + - join(\" \\\\\n\")" <<< "$images")" -fi +# translates all images defined the images.yaml into component descriptor resources. +# For detailed documentation see https://github.com/gardener/component-cli/blob/main/docs/reference/components-cli_image-vector_add.md +# the konnectivity-server is temporary excluded until the component-descriptor for the replica-reloader is released +component-cli image-vector add --comp-desc ${BASE_DEFINITION_PATH} \ + --image-vector "$repo_root_dir/charts/images.yaml" \ + --component-prefixes eu.gcr.io/gardener-project/gardener \ + --exclude-component-reference konnectivity-server \ + --generic-dependencies hyperkube,kube-apiserver,kube-controller-manager,kube-scheduler,kube-proxy if [[ -d "$repo_root_dir/charts/" ]]; then for image_tpl_path in "$repo_root_dir/charts/"*"/templates/_images.tpl"; do diff --git a/vendor/github.com/gardener/gardener/hack/.ci/ci.go b/vendor/github.com/gardener/gardener/hack/.ci/doc.go similarity index 98% rename from vendor/github.com/gardener/gardener/hack/.ci/ci.go rename to vendor/github.com/gardener/gardener/hack/.ci/doc.go index 0ce18ea50..64493305f 100755 --- a/vendor/github.com/gardener/gardener/hack/.ci/ci.go +++ b/vendor/github.com/gardener/gardener/hack/.ci/doc.go @@ -1,5 +1,3 @@ -// +build tools - // Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file // // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/vendor/github.com/gardener/gardener/hack/.ci/prepare_release b/vendor/github.com/gardener/gardener/hack/.ci/prepare_release index 11a18aacb..882afd477 100755 --- a/vendor/github.com/gardener/gardener/hack/.ci/prepare_release +++ b/vendor/github.com/gardener/gardener/hack/.ci/prepare_release @@ -35,7 +35,7 @@ apk add --no-cache \ gcc \ sed -GOLANG_VERSION="$(sed -rn 's/FROM golang:([^ ]+).*/\1/p' < "$repo_root_dir/Dockerfile")" +GOLANG_VERSION="$(sed -rn 's/FROM (eu\.gcr\.io\/gardener-project\/3rd\/golang|golang):([^ ]+).*/\2/p' < "$repo_root_dir/Dockerfile")" export \ GOROOT="$(go env GOROOT)" \ diff --git a/vendor/github.com/gardener/gardener/hack/.ci/set_dependency_version b/vendor/github.com/gardener/gardener/hack/.ci/set_dependency_version index b595a8a7d..e0b4ceee3 100755 --- a/vendor/github.com/gardener/gardener/hack/.ci/set_dependency_version +++ b/vendor/github.com/gardener/gardener/hack/.ci/set_dependency_version @@ -108,7 +108,7 @@ elif name == 'vpn': elif name == 'external-dns-management': names = ['dns-controller-manager'] elif name == 'logging': - names = ['fluentd-es', 'curator-es'] + names = ['fluent-bit-plugin-installer', 'loki-curator'] elif name == 'etcd-custom-image': names = ['etcd'] else: diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/field_constants.go b/vendor/github.com/gardener/gardener/pkg/apis/core/field_constants.go index 59aa51e9f..a951d7750 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/field_constants.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/field_constants.go @@ -24,10 +24,21 @@ const ( // the Seed cluster of a core.gardener.cloud/v1beta1 BackupEntry. BackupEntrySeedName = "spec.seedName" + // RegistrationRefName is the field selector path for finding + // the ControllerRegistration name of a core.gardener.cloud/{v1beta1,v1beta1} ControllerInstallation. + RegistrationRefName = "spec.registrationRef.name" + // SeedRefName is the field selector path for finding + // the Seed name of a core.gardener.cloud/{v1beta1,v1beta1} ControllerInstallation. + SeedRefName = "spec.seedRef.name" + // ShootCloudProfileName is the field selector path for finding // the CloudProfile name of a core.gardener.cloud/{v1alpha1,v1beta1} Shoot. ShootCloudProfileName = "spec.cloudProfileName" // ShootSeedName is the field selector path for finding // the Seed cluster of a core.gardener.cloud/{v1alpha1,v1beta1} Shoot. ShootSeedName = "spec.seedName" + // ShootStatusSeedName is the field selector path for finding + // the Seed cluster of a core.gardener.cloud/{v1alpha1,v1beta1} Shoot + // referred in the status. + ShootStatusSeedName = "status.seedName" ) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types.go new file mode 100644 index 000000000..abdcad0a7 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types.go @@ -0,0 +1,30 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +const ( + // GardenerSeedLeaseNamespace is the namespace in which Gardenlet will report Seeds' + // status using Lease resources for each Seed + GardenerSeedLeaseNamespace = "gardener-system-seed-lease" +) + +// Object is an core object resource. +type Object interface { + metav1.Object + // GetProviderType gets the type of the provider. + GetProviderType() string +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_backupbucket.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_backupbucket.go index f4da73642..a8d65be0f 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/types_backupbucket.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_backupbucket.go @@ -17,6 +17,7 @@ package core import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // +genclient @@ -50,7 +51,7 @@ type BackupBucketSpec struct { // Provider holds the details of cloud provider of the object store. Provider BackupBucketProvider // ProviderConfig is the configuration passed to BackupBucket resource. - ProviderConfig *ProviderConfig + ProviderConfig *runtime.RawExtension // SecretRef is a reference to a secret that contains the credentials to access object store. SecretRef corev1.SecretReference // SeedName holds the name of the seed allocated to BackupBucket for running controller. @@ -60,7 +61,7 @@ type BackupBucketSpec struct { // BackupBucketStatus holds the most recently observed status of the Backup Bucket. type BackupBucketStatus struct { // ProviderStatus is the configuration passed to BackupBucket resource. - ProviderStatus *ProviderConfig + ProviderStatus *runtime.RawExtension // LastOperation holds information about the last operation on the BackupBucket. LastOperation *LastOperation // LastError holds information about the last occurred error during an operation. diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_cloudprofile.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_cloudprofile.go index 2055e33a0..184b363b5 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/types_cloudprofile.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_cloudprofile.go @@ -17,6 +17,7 @@ package core import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // +genclient @@ -55,19 +56,35 @@ type CloudProfileSpec struct { // MachineTypes contains constraints regarding allowed values for machine types in the 'workers' block in the Shoot specification. MachineTypes []MachineType // ProviderConfig contains provider-specific configuration for the profile. - ProviderConfig *ProviderConfig + ProviderConfig *runtime.RawExtension // Regions contains constraints regarding allowed values for regions and zones. Regions []Region // SeedSelector contains an optional list of labels on `Seed` resources that marks those seeds whose shoots may use this provider profile. // An empty list means that all seeds of the same provider type are supported. // This is useful for environments that are of the same type (like openstack) but may have different "instances"/landscapes. - SeedSelector *metav1.LabelSelector + // Optionally a list of possible providers can be added to enable cross-provider scheduling. By default, the provider + // type of the seed must match the shoot's provider. + SeedSelector *SeedSelector // Type is the name of the provider. Type string // VolumeTypes contains constraints regarding allowed values for volume types in the 'workers' block in the Shoot specification. VolumeTypes []VolumeType } +func (c *CloudProfile) GetProviderType() string { + return c.Spec.Type +} + +// SeedSelector contains constraints for selecting seed to be usable for shoots using a profile +type SeedSelector struct { + // LabelSelector is optional and can be used to select seeds by their label settings + *metav1.LabelSelector + // ProviderTypes contains a list of allowed provider types used by the Gardener scheduler to restricting seeds by + // their provider type and enable cross-provider scheduling. + // By default, Shoots are only scheduled on Seeds having the same provider type. + ProviderTypes []string +} + // KubernetesSettings contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification. type KubernetesSettings struct { // Versions is the list of allowed Kubernetes versions with optional expiration dates for Shoot clusters. @@ -78,8 +95,15 @@ type KubernetesSettings struct { type MachineImage struct { // Name is the name of the image. Name string - // Versions contains versions and expiration dates of the machine image - Versions []ExpirableVersion + // Versions contains versions, expiration dates and container runtimes of the machine image + Versions []MachineImageVersion +} + +// MachineImageVersion is an expirable version with list of supported container runtimes and interfaces +type MachineImageVersion struct { + ExpirableVersion + // CRI list of supported container runtime and interfaces supported by this version + CRI []CRI } // ExpirableVersion contains a version and an expiration date. @@ -124,6 +148,10 @@ type Region struct { Name string // Zones is a list of availability zones in this region. Zones []AvailabilityZone + // Labels is an optional set of key-value pairs that contain certain administrator-controlled labels for this region. + // It can be used by Gardener administrators/operators to provide additional information about a region, e.g. wrt + // quality, reliability, access restrictions, etc. + Labels map[string]string } // AvailabilityZone is an availability zone. diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_common.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_common.go index ea3be15ac..abb184a3d 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/types_common.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_common.go @@ -22,14 +22,20 @@ import ( type ErrorCode string const ( - // ErrorInfraUnauthorized indicates that the last error occurred due to invalid cloud provider credentials. + // ErrorInfraUnauthorized indicates that the last error occurred due to invalid infrastructure credentials. ErrorInfraUnauthorized ErrorCode = "ERR_INFRA_UNAUTHORIZED" - // ErrorInfraInsufficientPrivileges indicates that the last error occurred due to insufficient cloud provider privileges. + // ErrorInfraInsufficientPrivileges indicates that the last error occurred due to insufficient infrastructure privileges. ErrorInfraInsufficientPrivileges ErrorCode = "ERR_INFRA_INSUFFICIENT_PRIVILEGES" - // ErrorInfraQuotaExceeded indicates that the last error occurred due to cloud provider quota limits. + // ErrorInfraQuotaExceeded indicates that the last error occurred due to infrastructure quota limits. ErrorInfraQuotaExceeded ErrorCode = "ERR_INFRA_QUOTA_EXCEEDED" - // ErrorInfraDependencies indicates that the last error occurred due to dependent objects on the cloud provider level. + // ErrorInfraDependencies indicates that the last error occurred due to dependent objects on the infrastructure level. ErrorInfraDependencies ErrorCode = "ERR_INFRA_DEPENDENCIES" + // ErrorInfraResourcesDepleted indicates that the last error occurred due to depleted resource in the infrastructure. + ErrorInfraResourcesDepleted ErrorCode = "ERR_INFRA_RESOURCES_DEPLETED" + // ErrorCleanupClusterResources indicates that the last error occurred due to resources in the cluster that are stuck in deletion. + ErrorCleanupClusterResources ErrorCode = "ERR_CLEANUP_CLUSTER_RESOURCES" + // ErrorConfigurationProblem indicates that the last error occurred due to a configuration problem. + ErrorConfigurationProblem ErrorCode = "ERR_CONFIGURATION_PROBLEM" ) // LastError indicates the last occurred error for an operation on a resource. @@ -53,6 +59,8 @@ const ( LastOperationTypeReconcile LastOperationType = "Reconcile" // LastOperationTypeDelete indicates a 'delete' operation. LastOperationTypeDelete LastOperationType = "Delete" + // LastOperationTypeRestore indicates a 'restore' operation. + LastOperationTypeRestore LastOperationType = "Restore" ) // LastOperationState is a string alias. diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerinstallation.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerinstallation.go index bfb233580..1c9b5d909 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerinstallation.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerinstallation.go @@ -17,6 +17,7 @@ package core import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // +genclient @@ -59,7 +60,7 @@ type ControllerInstallationStatus struct { Conditions []Condition // ProviderStatus contains type-specific status. // +optional - ProviderStatus *ProviderConfig + ProviderStatus *runtime.RawExtension } const ( @@ -69,4 +70,7 @@ const ( ControllerInstallationInstalled ConditionType = "Installed" // ControllerInstallationValid is a condition type for indicating whether the installation request is valid. ControllerInstallationValid ConditionType = "Valid" + // ControllerInstallationRequired is a condition type for indicating that the respective extension controller is + // still required on the seed cluster as corresponding extension resources still exist. + ControllerInstallationRequired ConditionType = "Required" ) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerregistration.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerregistration.go index 403d96f18..4a1408735 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerregistration.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerregistration.go @@ -16,6 +16,7 @@ package core import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // +genclient @@ -62,6 +63,10 @@ type ControllerResource struct { GloballyEnabled *bool // ReconcileTimeout defines how long Gardener should wait for the resource reconciliation. ReconcileTimeout *metav1.Duration + // Primary determines if the controller backed by this ControllerRegistration is responsible for the extension + // resource's lifecycle. This field defaults to true. There must be exactly one primary controller for this kind/type + // combination. + Primary *bool } // ControllerDeployment contains information for how this controller is deployed. @@ -69,5 +74,26 @@ type ControllerDeployment struct { // Type is the deployment type. Type string // ProviderConfig contains type-specific configuration. - ProviderConfig *ProviderConfig + ProviderConfig *runtime.RawExtension + // Policy controls how the controller is deployed. It defaults to 'OnDemand'. + Policy *ControllerDeploymentPolicy + // SeedSelector contains an optional label selector for seeds. Only if the labels match then this controller will be + // considered for a deployment. + // An empty list means that all seeds are selected. + SeedSelector *metav1.LabelSelector } + +// ControllerDeploymentPolicy is a string alias. +type ControllerDeploymentPolicy string + +const ( + // ControllerDeploymentPolicyOnDemand specifies that the controller shall be only deployed if required by another + // resource. If nothing requires it then the controller shall not be deployed. + ControllerDeploymentPolicyOnDemand ControllerDeploymentPolicy = "OnDemand" + // ControllerDeploymentPolicyAlways specifies that the controller shall be deployed always, independent of whether + // another resource requires it or the respective seed has shoots. + ControllerDeploymentPolicyAlways ControllerDeploymentPolicy = "Always" + // ControllerDeploymentPolicyAlwaysExceptNoShoots specifies that the controller shall be deployed always, independent of + // whether another resource requires it, but only when the respective seed has at least one shoot. + ControllerDeploymentPolicyAlwaysExceptNoShoots ControllerDeploymentPolicy = "AlwaysExceptNoShoots" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_project.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_project.go index c5be18dcc..5593986b5 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/types_project.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_project.go @@ -63,6 +63,8 @@ type ProjectSpec struct { // Namespace is the name of the namespace that has been created for the Project object. // A nil value means that Gardener will determine the name of the namespace. Namespace *string + // Tolerations contains the default tolerations and a list for allowed taints on seed clusters. + Tolerations *ProjectTolerations } // ProjectStatus holds the most recently observed status of the project. @@ -71,6 +73,11 @@ type ProjectStatus struct { ObservedGeneration int64 // Phase is the current phase of the project. Phase ProjectPhase + // StaleSinceTimestamp contains the timestamp when the project was first discovered to be stale/unused. + StaleSinceTimestamp *metav1.Time + // StaleAutoDeleteTimestamp contains the timestamp when the project will be garbage-collected/automatically deleted + // because it's stale/unused. + StaleAutoDeleteTimestamp *metav1.Time } // ProjectMember is a member of a project. @@ -82,6 +89,23 @@ type ProjectMember struct { Roles []string } +// ProjectTolerations contains the tolerations for taints on seed clusters. +type ProjectTolerations struct { + // Defaults contains a list of tolerations that are added to the shoots in this project by default. + Defaults []Toleration + // Whitelist contains a list of tolerations that are allowed to be added to the shoots in this project. Please note + // that this list may only be added by users having the `spec-tolerations-whitelist` verb for project resources. + Whitelist []Toleration +} + +// Toleration is a toleration for a seed taint. +type Toleration struct { + // Key is the toleration key to be applied to a project or shoot. + Key string + // Value is the toleration value corresponding to the toleration key. + Value *string +} + const ( // ProjectMemberAdmin is a const for a role that provides full admin access. ProjectMemberAdmin = "admin" @@ -89,7 +113,8 @@ const ( ProjectMemberOwner = "owner" // ProjectMemberViewer is a const for a role that provides limited permissions to only view some resources. ProjectMemberViewer = "viewer" - + // ProjectMemberUserAccessManager is a const for a role that provides permissions to manage human user(s, (groups)). + ProjectMemberUserAccessManager = "uam" // ProjectMemberExtensionPrefix is a prefix for custom roles that are not known by Gardener. ProjectMemberExtensionPrefix = "extension:" ) @@ -111,6 +136,8 @@ const ( ProjectEventNamespaceReconcileFailed = "NamespaceReconcileFailed" // ProjectEventNamespaceReconcileSuccessful indicates that the namespace reconciliation has succeeded. ProjectEventNamespaceReconcileSuccessful = "NamespaceReconcileSuccessful" + // ProjectEventNamespaceNotEmpty indicates that the namespace cannot be released because it is not empty. + ProjectEventNamespaceNotEmpty = "NamespaceNotEmpty" // ProjectEventNamespaceDeletionFailed indicates that the namespace deletion failed. ProjectEventNamespaceDeletionFailed = "NamespaceDeletionFailed" // ProjectEventNamespaceMarkedForDeletion indicates that the namespace has been successfully marked for deletion. diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_seed.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_seed.go index 17884e556..628eea28b 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/types_seed.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_seed.go @@ -18,6 +18,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // +genclient @@ -46,6 +47,14 @@ type SeedList struct { Items []Seed } +// SeedTemplate is a template for creating a Seed object. +type SeedTemplate struct { + // Standard object metadata. + metav1.ObjectMeta + // Specification of the desired behavior of the Seed. + Spec SeedSpec +} + // SeedSpec is the specification of a Seed. type SeedSpec struct { // Backup holds the object store configuration for the backups of shoot (currently only etcd). @@ -62,10 +71,18 @@ type SeedSpec struct { // SecretRef is a reference to a Secret object containing the Kubeconfig and the cloud provider credentials for // the account the Seed cluster has been deployed to. SecretRef *corev1.SecretReference + // Settings contains certain settings for this seed cluster. + Settings *SeedSettings // Taints describes taints on the seed. Taints []SeedTaint // Volume contains settings for persistentvolumes created in the seed cluster. Volume *SeedVolume + // Ingress configures Ingress specific settings of the Seed cluster. + Ingress *Ingress +} + +func (s *Seed) GetProviderType() string { + return s.Spec.Provider.Type } // SeedStatus is the status of a Seed. @@ -79,6 +96,13 @@ type SeedStatus struct { // ObservedGeneration is the most recent generation observed for this Seed. It corresponds to the // Seed's generation, which is updated on mutation by the API Server. ObservedGeneration int64 + // ClusterIdentity is the identity of Seed cluster + ClusterIdentity *string + // Capacity represents the total resources of a seed. + Capacity corev1.ResourceList + // Allocatable represents the resources of a seed that are available for scheduling. + // Defaults to Capacity. + Allocatable corev1.ResourceList } // SeedBackup contains the object store configuration for backups for shoot (currently only etcd). @@ -86,7 +110,7 @@ type SeedBackup struct { // Provider is a provider name. Provider string // ProviderConfig is the configuration passed to BackupBucket resource. - ProviderConfig *ProviderConfig + ProviderConfig *runtime.RawExtension // Region is a region name. Region *string // SecretRef is a reference to a Secret object containing the cloud provider credentials for @@ -95,11 +119,43 @@ type SeedBackup struct { SecretRef corev1.SecretReference } -// SeedDNS contains DNS-relevant information about this seed cluster. +// SeedDNS contains the external domain and configuration for the DNS provider type SeedDNS struct { // IngressDomain is the domain of the Seed cluster pointing to the ingress controller endpoint. It will be used - // to construct ingress URLs for system applications running in Shoot clusters. - IngressDomain string + // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable. + // This will be removed in the next API version and replaced by spec.ingress.domain. + IngressDomain *string + // Provider configures a DNSProvider + Provider *SeedDNSProvider +} + +// SeedDNSProvider configures a DNS provider +type SeedDNSProvider struct { + // Type describes the type of the dns-provider, for example `aws-route53` + Type string + // SecretRef is a reference to a Secret object containing cloud provider credentials used for registering external domains. + SecretRef corev1.SecretReference + // Domains contains information about which domains shall be included/excluded for this provider. + Domains *DNSIncludeExclude + // Zones contains information about which hosted zones shall be included/excluded for this provider. + Zones *DNSIncludeExclude +} + +// Ingress configures the Ingress specific settings of the Seed cluster +type Ingress struct { + // Domain specifies the ingress domain of the Seed cluster pointing to the ingress controller endpoint. It will be used + // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable. + Domain string + // Controller configures a Gardener managed Ingress Controller listening on the ingressDomain + Controller IngressController +} + +// IngressController enables a Gardener managed Ingress Controller listening on the ingressDomain +type IngressController struct { + // Kind defines which kind of IngressController to use, for example `nginx` + Kind string + // ProviderConfig specifies infrastructure specific configuration for the ingressController + ProviderConfig *runtime.RawExtension } // SeedNetworks contains CIDRs for the pod, service and node networks of a Kubernetes cluster. @@ -130,11 +186,65 @@ type SeedProvider struct { // Type is the name of the provider. Type string // ProviderConfig is the configuration passed to Seed resource. - ProviderConfig *ProviderConfig + ProviderConfig *runtime.RawExtension // Region is a name of a region. Region string } +// SeedSettings contains certain settings for this seed cluster. +type SeedSettings struct { + // ExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the seed. + ExcessCapacityReservation *SeedSettingExcessCapacityReservation + // Scheduling controls settings for scheduling decisions for the seed. + Scheduling *SeedSettingScheduling + // ShootDNS controls the shoot DNS settings for the seed. + ShootDNS *SeedSettingShootDNS + // LoadBalancerServices controls certain settings for services of type load balancer that are created in the + // seed. + LoadBalancerServices *SeedSettingLoadBalancerServices + // VerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the seed. + VerticalPodAutoscaler *SeedSettingVerticalPodAutoscaler +} + +// SeedSettingExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the +// seed. When enabled then this is done via PodPriority and requires the Seed cluster to have Kubernetes version 1.11 +// or the PodPriority feature gate as well as the scheduling.k8s.io/v1alpha1 API group enabled. +type SeedSettingExcessCapacityReservation struct { + // Enabled controls whether the excess capacity reservation should be enabled. + Enabled bool +} + +// SeedSettingShootDNS controls the shoot DNS settings for the seed. +type SeedSettingShootDNS struct { + // Enabled controls whether the DNS for shoot clusters should be enabled. When disabled then all shoots using the + // seed won't get any DNS providers, DNS records, and no DNS extension controller is required to be installed here. + // This is useful for environments where DNS is not required. + Enabled bool +} + +// SeedSettingScheduling controls settings for scheduling decisions for the seed. +type SeedSettingScheduling struct { + // Visible controls whether the gardener-scheduler shall consider this seed when scheduling shoots. Invisible seeds + // are not considered by the scheduler. + Visible bool +} + +// SeedSettingLoadBalancerServices controls certain settings for services of type load balancer that are created in the +// seed. +type SeedSettingLoadBalancerServices struct { + // Annotations is a map of annotations that will be injected/merged into every load balancer service object. + Annotations map[string]string +} + +// SeedSettingVerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the +// seed. +type SeedSettingVerticalPodAutoscaler struct { + // Enabled controls whether the VPA components shall be deployed into the garden namespace in the seed cluster. It + // is enabled by default because Gardener heavily relies on a VPA being deployed. You should only disable this if + // your seed cluster already has another, manually/custom managed VPA deployment. + Enabled bool +} + // SeedTaint describes a taint on a seed. type SeedTaint struct { // Key is the taint key to be applied to a seed. @@ -144,20 +254,9 @@ type SeedTaint struct { } const ( - // SeedTaintDisableCapacityReservation is a constant for a taint key on a seed that marks it for disabling - // excess capacity reservation. This can be useful for seed clusters which only host shooted seeds to reduce - // costs. - SeedTaintDisableCapacityReservation = "seed.gardener.cloud/disable-capacity-reservation" - // SeedTaintDisableDNS is a constant for a taint key on a seed that marks it for disabling DNS. All shoots - // using this seed won't get any DNS providers, DNS records, and no DNS extension controller is required to - // be installed here. This is useful for environment where DNS is not required. - SeedTaintDisableDNS = "seed.gardener.cloud/disable-dns" // SeedTaintProtected is a constant for a taint key on a seed that marks it as protected. Protected seeds // may only be used by shoots in the `garden` namespace. SeedTaintProtected = "seed.gardener.cloud/protected" - // SeedTaintInvisible is a constant for a taint key on a seed that marks it as invisible. Invisible seeds - // are not considered by the gardener-scheduler. - SeedTaintInvisible = "seed.gardener.cloud/invisible" ) // SeedVolume contains settings for persistentvolumes created in the seed cluster. @@ -185,3 +284,9 @@ const ( // SeedGardenletReady is a constant for a condition type indicating that the Gardenlet is ready. SeedGardenletReady ConditionType = "GardenletReady" ) + +// Resource constants for Gardener object types +const ( + // ResourceShoots is a resource constant for the number of shoots. + ResourceShoots corev1.ResourceName = "shoots" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go index ac4edfced..b3f2b6dd4 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go @@ -17,9 +17,11 @@ package core import ( "time" + autoscalingv1 "k8s.io/api/autoscaling/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -80,6 +82,16 @@ type ShootSpec struct { SecretBindingName string // SeedName is the name of the seed cluster that runs the control plane of the Shoot. SeedName *string + // SeedSelector is an optional selector which must match a seed's labels for the shoot to be scheduled on that seed. + SeedSelector *SeedSelector + // Resources holds a list of named resource references that can be referred to in extension configs by their names. + Resources []NamedResourceReference + // Tolerations contains the tolerations for taints on seed clusters. + Tolerations []Toleration +} + +func (s *Shoot) GetProviderType() string { + return s.Spec.Provider.Type } // ShootStatus holds the most recently observed status of the Shoot cluster. @@ -111,6 +123,8 @@ type ShootStatus struct { // UID is a unique identifier for the Shoot cluster to avoid portability between Kubernetes clusters. // It is used to compute unique hashes. UID types.UID + // ClusterIdentity is the identity of the Shoot cluster + ClusterIdentity *string } ////////////////////////////////////////////////////////////////////////////////////////////////// @@ -148,7 +162,7 @@ const ( // NginxIngress describes configuration values for the nginx-ingress addon. type NginxIngress struct { Addon - // LoadBalancerSourceRanges is list of whitelist IP sources for NginxIngress + // LoadBalancerSourceRanges is list of allowed IP sources for NginxIngress LoadBalancerSourceRanges []string // Config contains custom configuration for the nginx-ingress-controller configuration. // See https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md#configuration-options @@ -165,7 +179,7 @@ type NginxIngress struct { // DNS holds information about the provider, the hosted zone id and the domain. type DNS struct { // Domain is the external available domain of the Shoot cluster. This domain will be written into the - // kubeconfig that is handed out to end-users. + // kubeconfig that is handed out to end-users. Once set it is immutable. Domain *string // Providers is a list of DNS providers that shall be enabled for this shoot cluster. Only relevant if // not a default domain is used. @@ -209,7 +223,21 @@ type Extension struct { // Type is the type of the extension resource. Type string // ProviderConfig is the configuration passed to extension resource. - ProviderConfig *ProviderConfig + ProviderConfig *runtime.RawExtension + // Disabled allows to disable extensions that were marked as 'globally enabled' by Gardener administrators. + Disabled *bool +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// NamedResourceReference relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// NamedResourceReference is a named reference to a resource. +type NamedResourceReference struct { + // Name of the resource reference. + Name string + // ResourceRef is a reference to a resource. + ResourceRef autoscalingv1.CrossVersionObjectReference } ////////////////////////////////////////////////////////////////////////////////////////////////// @@ -245,7 +273,7 @@ type HibernationSchedule struct { type Kubernetes struct { // AllowPrivilegedContainers indicates whether privileged containers are allowed in the Shoot (default: true). AllowPrivilegedContainers *bool - // ClusterAutoscaler contains the configration flags for the Kubernetes cluster autoscaler. + // ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler. ClusterAutoscaler *ClusterAutoscaler // KubeAPIServer contains configuration settings for the kube-apiserver. KubeAPIServer *KubeAPIServerConfig @@ -259,9 +287,11 @@ type Kubernetes struct { Kubelet *KubeletConfig // Version is the semantic Kubernetes version to use for the Shoot cluster. Version string + // VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler. + VerticalPodAutoscaler *VerticalPodAutoscaler } -// ClusterAutoscaler contains the configration flags for the Kubernetes cluster autoscaler. +// ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler. type ClusterAutoscaler struct { // ScaleDownDelayAfterAdd defines how long after scale up that scale down evaluation resumes (default: 1 hour). ScaleDownDelayAfterAdd *metav1.Duration @@ -277,6 +307,30 @@ type ClusterAutoscaler struct { ScanInterval *metav1.Duration } +// VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler. +type VerticalPodAutoscaler struct { + // Enabled specifies whether the Kubernetes VPA shall be enabled for the shoot cluster. + Enabled bool + // EvictAfterOOMThreshold defines the threshold that will lead to pod eviction in case it OOMed in less than the given + // threshold since its start and if it has only one container (default: 10m0s). + EvictAfterOOMThreshold *metav1.Duration + // EvictionRateBurst defines the burst of pods that can be evicted (default: 1) + EvictionRateBurst *int32 + // EvictionRateLimit defines the number of pods that can be evicted per second. A rate limit set to 0 or -1 will + // disable the rate limiter (default: -1). + EvictionRateLimit *float64 + // EvictionTolerance defines the fraction of replica count that can be evicted for update in case more than one + // pod can be evicted (default: 0.5). + EvictionTolerance *float64 + // RecommendationMarginFraction is the fraction of usage added as the safety margin to the recommended request + // (default: 0.15). + RecommendationMarginFraction *float64 + // UpdaterInterval is the interval how often the updater should run (default: 1m0s). + UpdaterInterval *metav1.Duration + // RecommenderInterval is the interval how often metrics should be fetched (default: 1m0s). + RecommenderInterval *metav1.Duration +} + // KubernetesConfig contains common configuration fields for the control plane components. type KubernetesConfig struct { // FeatureGates contains information about enabled feature gates. @@ -304,6 +358,26 @@ type KubeAPIServerConfig struct { // ServiceAccountConfig contains configuration settings for the service account handling // of the kube-apiserver. ServiceAccountConfig *ServiceAccountConfig + // WatchCacheSizes contains configuration of the API server's watch cache sizes. + // Configuring these flags might be useful for large-scale Shoot clusters with a lot of parallel update requests + // and a lot of watching controllers (e.g. large shooted Seed clusters). When the API server's watch cache's + // capacity is too small to cope with the amount of update requests and watchers for a particular resource, it + // might happen that controller watches are permanently stopped with `too old resource version` errors. + // Starting from kubernetes v1.19, the API server's watch cache size is adapted dynamically and setting the watch + // cache size flags will have no effect, except when setting it to 0 (which disables the watch cache). + WatchCacheSizes *WatchCacheSizes + // Requests contains configuration for request-specific settings for the kube-apiserver. + Requests *KubeAPIServerRequests +} + +// KubeAPIServerRequests contains configuration for request-specific settings for the kube-apiserver. +type KubeAPIServerRequests struct { + // MaxNonMutatingInflight is the maximum number of non-mutating requests in flight at a given time. When the server + // exceeds this, it rejects requests. + MaxNonMutatingInflight *int32 + // MaxMutatingInflight is the maximum number of mutating requests in flight at a given time. When the server + // exceeds this, it rejects requests. + MaxMutatingInflight *int32 } // ServiceAccountConfig is the kube-apiserver configuration for service accounts. @@ -371,7 +445,31 @@ type AdmissionPlugin struct { // Name is the name of the plugin. Name string // Config is the configuration of the plugin. - Config *ProviderConfig + Config *runtime.RawExtension +} + +// WatchCacheSizes contains configuration of the API server's watch cache sizes. +type WatchCacheSizes struct { + // Default configures the default watch cache size of the kube-apiserver + // (flag `--default-watch-cache-size`, defaults to 100). + // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/ + Default *int32 + // Resources configures the watch cache size of the kube-apiserver per resource + // (flag `--watch-cache-sizes`). + // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/ + Resources []ResourceWatchCacheSize +} + +// ResourceWatchCacheSize contains configuration of the API server's watch cache size for one specific resource. +type ResourceWatchCacheSize struct { + // APIGroup is the API group of the resource for which the watch cache size should be configured. + // An unset value is used to specify the legacy core API (e.g. for `secrets`). + APIGroup *string + // Resource is the name of the resource for which the watch cache size should be configured + // (in lowercase plural form, e.g. `secrets`). + Resource string + // CacheSize specifies the watch cache size that should be configured for the specified resource. + CacheSize int32 } // KubeControllerManagerConfig contains configuration settings for the kube-controller-manager. @@ -381,6 +479,8 @@ type KubeControllerManagerConfig struct { HorizontalPodAutoscalerConfig *HorizontalPodAutoscalerConfig // NodeCIDRMaskSize defines the mask size for node cidr in cluster (default is 24) NodeCIDRMaskSize *int32 + // PodEvictionTimeout defines the grace period for deleting pods on failed nodes. + PodEvictionTimeout *metav1.Duration } // HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager. @@ -422,6 +522,11 @@ const ( // KubeSchedulerConfig contains configuration settings for the kube-scheduler. type KubeSchedulerConfig struct { KubernetesConfig + // KubeMaxPDVols allows to configure the `KUBE_MAX_PD_VOLS` environment variable for the kube-scheduler. + // Please find more information here: https://kubernetes.io/docs/concepts/storage/storage-limits/#custom-limits + // Note that using this field is considered alpha-/experimental-level and is on your own risk. You should be aware + // of all the side-effects and consequences when changing it. + KubeMaxPDVols *string } // KubeProxyConfig contains configuration settings for the kube-proxy. @@ -495,6 +600,15 @@ type KubeletConfig struct { // ImagePullProgressDeadline describes the time limit under which if no pulling progress is made, the image pulling will be cancelled. // Default: 1m ImagePullProgressDeadline *metav1.Duration + // FailSwapOn makes the Kubelet fail to start if swap is enabled on the node. (default true). + FailSwapOn *bool + // KubeReserved is the configuration for resources reserved for kubernetes node components (mainly kubelet and container runtime). + // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied. + // Default: cpu=80m,memory=1Gi,pid=20k + KubeReserved *KubeletConfigReserved + // SystemReserved is the configuration for resources reserved for system processes not managed by kubernetes (e.g. journald). + // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied. + SystemReserved *KubeletConfigReserved } // KubeletConfigEviction contains kubelet eviction thresholds supporting either a resource.Quantity or a percentage based value. @@ -539,6 +653,19 @@ type KubeletConfigEvictionSoftGracePeriod struct { NodeFSInodesFree *metav1.Duration } +// KubeletConfigReserved contains reserved resources for daemons +type KubeletConfigReserved struct { + // CPU is the reserved cpu. + CPU *resource.Quantity + // Memory is the reserved memory. + Memory *resource.Quantity + // EphemeralStorage is the reserved ephemeral-storage. + EphemeralStorage *resource.Quantity + // PID is the reserved process-ids. + // To reserve PID, the SupportNodePidsLimit feature gate must be enabled in Kubernetes versions < 1.15. + PID *resource.Quantity +} + ////////////////////////////////////////////////////////////////////////////////////////////////// // Networking relevant types // ////////////////////////////////////////////////////////////////////////////////////////////////// @@ -548,7 +675,7 @@ type Networking struct { // Type identifies the type of the networking plugin. Type string // ProviderConfig is the configuration passed to network resource. - ProviderConfig *ProviderConfig + ProviderConfig *runtime.RawExtension // Pods is the CIDR of the pod network. Pods *string // Nodes is the CIDR of the entire node network. @@ -568,6 +695,13 @@ const ( // Maintenance relevant types // ////////////////////////////////////////////////////////////////////////////////////////////////// +const ( + // MaintenanceTimeWindowDurationMinimum is the minimum duration for a maintenance time window. + MaintenanceTimeWindowDurationMinimum = 30 * time.Minute + // MaintenanceTimeWindowDurationMaximum is the maximum duration for a maintenance time window. + MaintenanceTimeWindowDurationMaximum = 6 * time.Hour +) + // Maintenance contains information about the time window for maintenance operations and which // operations should be performed. type Maintenance struct { @@ -575,6 +709,10 @@ type Maintenance struct { AutoUpdate *MaintenanceAutoUpdate // TimeWindow contains information about the time window for maintenance operations. TimeWindow *MaintenanceTimeWindow + // ConfineSpecUpdateRollout prevents that changes/updates to the shoot specification will be rolled out immediately. + // Instead, they are rolled out during the shoot's maintenance time window. There is one exception that will trigger + // an immediate roll out which is changes to the Spec.Hibernation.Enabled field. + ConfineSpecUpdateRollout *bool } // MaintenanceAutoUpdate contains information about which constraints should be automatically updated. @@ -622,10 +760,10 @@ type Provider struct { Type string // ControlPlaneConfig contains the provider-specific control plane config blob. Please look up the concrete // definition in the documentation of your provider extension. - ControlPlaneConfig *ProviderConfig + ControlPlaneConfig *runtime.RawExtension // InfrastructureConfig contains the provider-specific infrastructure config blob. Please look up the concrete // definition in the documentation of your provider extension. - InfrastructureConfig *ProviderConfig + InfrastructureConfig *runtime.RawExtension // Workers is a list of worker groups. Workers []Worker } @@ -655,18 +793,42 @@ type Worker struct { // MaxUnavailable is the maximum number of VMs that can be unavailable during an update. MaxUnavailable *intstr.IntOrString // ProviderConfig is the provider-specific configuration for this worker pool. - ProviderConfig *ProviderConfig + ProviderConfig *runtime.RawExtension + // SystemComponents contains configuration for system components related to this worker pool + SystemComponents *WorkerSystemComponents // Taints is a list of taints for all the `Node` objects in this worker pool. Taints []corev1.Taint // Volume contains information about the volume type and size. Volume *Volume // DataVolumes contains a list of additional worker volumes. - DataVolumes []Volume + DataVolumes []DataVolume // KubeletDataVolumeName contains the name of a dataVolume that should be used for storing kubelet state. KubeletDataVolumeName *string // Zones is a list of availability zones that are used to evenly distribute this worker pool. Optional // as not every provider may support availability zones. Zones []string + // MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout. + MachineControllerManagerSettings *MachineControllerManagerSettings +} + +// MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout. +type MachineControllerManagerSettings struct { + // MachineDrainTimeout is the period after which machine is forcefully deleted. + MachineDrainTimeout *metav1.Duration + // MachineHealthTimeout is the period after which machine is declared failed. + MachineHealthTimeout *metav1.Duration + // MachineCreationTimeout is the period after which creation of the machine is declared failed. + MachineCreationTimeout *metav1.Duration + // MaxEvictRetries are the number of eviction retries on a pod after which drain is declared failed, and forceful deletion is triggered. + MaxEvictRetries *int32 + // NodeConditions are the set of conditions if set to true for the period of MachineHealthTimeout, machine will be declared failed. + NodeConditions []string +} + +// WorkerSystemComponents contains configuration for system components related to this worker pool +type WorkerSystemComponents struct { + // Allow determines whether the pool should be allowed to host system components or not (defaults to true) + Allow bool } // WorkerKubernetes contains configuration for Kubernetes components related to this worker pool. @@ -690,7 +852,7 @@ type ShootMachineImage struct { // Name is the name of the image. Name string // ProviderConfig is the shoot's individual configuration passed to an extension resource. - ProviderConfig *ProviderConfig + ProviderConfig *runtime.RawExtension // Version is the version of the shoot's image. // If version is not provided, it will be defaulted to the latest version from the CloudProfile. Version string @@ -708,6 +870,18 @@ type Volume struct { Encrypted *bool } +// DataVolume contains information about a data volume. +type DataVolume struct { + // Name of the volume to make it referencable. + Name string + // Type is the type of the volume. + Type *string + // VolumeSize is the size of the volume. + VolumeSize string + // Encrypted determines if the volume should be encrypted. + Encrypted *bool +} + // CRI contains information about the Container Runtimes. type CRI struct { // The name of the CRI library @@ -728,7 +902,7 @@ type ContainerRuntime struct { // Type is the type of the Container Runtime. Type string // ProviderConfig is the configuration passed to the ContainerRuntime resource. - ProviderConfig *ProviderConfig + ProviderConfig *runtime.RawExtension } var ( @@ -743,11 +917,14 @@ var ( ////////////////////////////////////////////////////////////////////////////////////////////////// const ( - // ShootEventMaintenanceDone indicates that a maintenance operation has been performed. - ShootEventMaintenanceDone = "MaintenanceDone" - // ShootEventMaintenanceError indicates that a maintenance operation has failed. - ShootEventMaintenanceError = "MaintenanceError" - + // ShootEventImageVersionMaintenance indicates that a maintenance operation regarding the image version has been performed. + ShootEventImageVersionMaintenance = "MachineImageVersionMaintenance" + // ShootEventK8sVersionMaintenance indicates that a maintenance operation regarding the K8s version has been performed. + ShootEventK8sVersionMaintenance = "KubernetesVersionMaintenance" + // ShootEventHibernationEnabled indicates that hibernation started. + ShootEventHibernationEnabled = "Hibernated" + // ShootEventHibernationDisabled indicates that hibernation ended. + ShootEventHibernationDisabled = "WokenUp" // ShootEventSchedulingSuccessful indicates that a scheduling decision was taken successfully. ShootEventSchedulingSuccessful = "SchedulingSuccessful" // ShootEventSchedulingFailed indicates that a scheduling decision failed. @@ -765,6 +942,9 @@ const ( ShootSystemComponentsHealthy ConditionType = "SystemComponentsHealthy" // ShootHibernationPossible is a constant for a condition type indicating whether the Shoot can be hibernated. ShootHibernationPossible ConditionType = "HibernationPossible" + // ShootMaintenancePreconditionsSatisfied is a constant for a condition type indicating whether all preconditions + // for a shoot maintenance operation are satisfied. + ShootMaintenancePreconditionsSatisfied ConditionType = "MaintenancePreconditionsSatisfied" ) // DNSUnmanaged is a constant for the 'unmanaged' DNS provider. diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_shootstate.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_shootstate.go index 215fcd4cc..55e04ff5c 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/types_shootstate.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_shootstate.go @@ -15,6 +15,7 @@ package core import ( + autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -48,6 +49,8 @@ type ShootStateSpec struct { Gardener []GardenerResourceData // Extensions holds the state of custom resources reconciled by extension controllers in the seed Extensions []ExtensionResourceState + // Resources holds the data of resources referred to by extension controller states + Resources []ResourceData } // GardenerResourceData holds the data which is used to generate resources, deployed in the Shoot's control plane. @@ -70,5 +73,14 @@ type ExtensionResourceState struct { // Purpose of the extension custom resource Purpose *string // State of the extension resource - State runtime.RawExtension + State *runtime.RawExtension + // Resources holds a list of named resource references that can be referred to in the state by their names. + Resources []NamedResourceReference +} + +// ResourceData holds the data of a resource referred to by an extension controller state. +type ResourceData struct { + autoscalingv1.CrossVersionObjectReference + // Data of the resource + Data runtime.RawExtension } diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_utils.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_utils.go index 370ed9744..5315760d5 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/types_utils.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_utils.go @@ -16,16 +16,8 @@ package core import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" ) -// ProviderConfig is a workaround for missing OpenAPI functions on runtime.RawExtension struct. -// https://github.com/kubernetes/kubernetes/issues/55890 -// https://github.com/kubernetes-sigs/cluster-api/issues/137 -type ProviderConfig struct { - runtime.RawExtension -} - // Condition holds the information about the state of a resource. type Condition struct { // Type of the Shoot condition. @@ -40,6 +32,8 @@ type Condition struct { Reason string // A human readable message indicating details about the transition. Message string + // Well-defined error codes in case the condition reports a problem. + Codes []ErrorCode } // ConditionStatus is the status of a condition. diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/conversions.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/conversions.go new file mode 100644 index 000000000..48dce7b58 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/conversions.go @@ -0,0 +1,410 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "fmt" + "unsafe" + + "github.com/gardener/gardener/pkg/apis/core" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + if err := scheme.AddFieldLabelConversionFunc( + SchemeGroupVersion.WithKind("ControllerInstallation"), + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", core.RegistrationRefName, core.SeedRefName: + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ); err != nil { + return err + } + + if err := scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Shoot"), + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace", core.ShootSeedName, core.ShootCloudProfileName, core.ShootStatusSeedName: + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ); err != nil { + return err + } + + // Add non-generated conversion functions + if err := scheme.AddConversionFunc((*BackupBucket)(nil), (*core.BackupBucket)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_BackupBucket_To_core_BackupBucket(a.(*BackupBucket), b.(*core.BackupBucket), scope) + }); err != nil { + return err + } + + if err := scheme.AddConversionFunc((*BackupBucketSpec)(nil), (*core.BackupBucketSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_BackupBucketSpec_To_core_BackupBucketSpec(a.(*BackupBucketSpec), b.(*core.BackupBucketSpec), scope) + }); err != nil { + return err + } + + if err := scheme.AddConversionFunc((*BackupEntry)(nil), (*core.BackupEntry)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_BackupEntry_To_core_BackupEntry(a.(*BackupEntry), b.(*core.BackupEntry), scope) + }); err != nil { + return err + } + + if err := scheme.AddConversionFunc((*BackupEntrySpec)(nil), (*core.BackupEntrySpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_BackupEntrySpec_To_core_BackupEntrySpec(a.(*BackupEntrySpec), b.(*core.BackupEntrySpec), scope) + }); err != nil { + return err + } + + if err := scheme.AddConversionFunc((*Seed)(nil), (*core.Seed)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Seed_To_core_Seed(a.(*Seed), b.(*core.Seed), scope) + }); err != nil { + return err + } + + if err := scheme.AddConversionFunc((*SeedSpec)(nil), (*core.SeedSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedSpec_To_core_SeedSpec(a.(*SeedSpec), b.(*core.SeedSpec), scope) + }); err != nil { + return err + } + + if err := scheme.AddConversionFunc((*SeedNetworks)(nil), (*core.SeedNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedNetworks_To_core_SeedNetworks(a.(*SeedNetworks), b.(*core.SeedNetworks), scope) + }); err != nil { + return err + } + + if err := scheme.AddConversionFunc((*ShootStatus)(nil), (*core.ShootStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ShootStatus_To_core_ShootStatus(a.(*ShootStatus), b.(*core.ShootStatus), scope) + }); err != nil { + return err + } + + if err := scheme.AddConversionFunc((*core.BackupBucket)(nil), (*BackupBucket)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupBucket_To_v1alpha1_BackupBucket(a.(*core.BackupBucket), b.(*BackupBucket), scope) + }); err != nil { + return err + } + + if err := scheme.AddConversionFunc((*core.BackupBucketSpec)(nil), (*BackupBucketSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupBucketSpec_To_v1alpha1_BackupBucketSpec(a.(*core.BackupBucketSpec), b.(*BackupBucketSpec), scope) + }); err != nil { + return err + } + + if err := scheme.AddConversionFunc((*core.BackupEntry)(nil), (*BackupEntry)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupEntry_To_v1alpha1_BackupEntry(a.(*core.BackupEntry), b.(*BackupEntry), scope) + }); err != nil { + return err + } + + if err := scheme.AddConversionFunc((*core.BackupEntrySpec)(nil), (*BackupEntrySpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupEntrySpec_To_v1alpha1_BackupEntrySpec(a.(*core.BackupEntrySpec), b.(*BackupEntrySpec), scope) + }); err != nil { + return err + } + + if err := scheme.AddConversionFunc((*core.Seed)(nil), (*Seed)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Seed_To_v1alpha1_Seed(a.(*core.Seed), b.(*Seed), scope) + }); err != nil { + return err + } + + if err := scheme.AddConversionFunc((*core.SeedSpec)(nil), (*SeedSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedSpec_To_v1alpha1_SeedSpec(a.(*core.SeedSpec), b.(*SeedSpec), scope) + }); err != nil { + return err + } + + if err := scheme.AddConversionFunc((*core.SeedNetworks)(nil), (*SeedNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedNetworks_To_v1alpha1_SeedNetworks(a.(*core.SeedNetworks), b.(*SeedNetworks), scope) + }); err != nil { + return err + } + + if err := scheme.AddConversionFunc((*core.ShootStatus)(nil), (*ShootStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ShootStatus_To_v1alpha1_ShootStatus(a.(*core.ShootStatus), b.(*ShootStatus), scope) + }); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_BackupBucket_To_core_BackupBucket(in *BackupBucket, out *core.BackupBucket, s conversion.Scope) error { + if err := autoConvert_v1alpha1_BackupBucket_To_core_BackupBucket(in, out, s); err != nil { + return err + } + + out.Spec.SeedName = in.Spec.Seed + + return nil +} + +func Convert_core_BackupBucket_To_v1alpha1_BackupBucket(in *core.BackupBucket, out *BackupBucket, s conversion.Scope) error { + if err := autoConvert_core_BackupBucket_To_v1alpha1_BackupBucket(in, out, s); err != nil { + return err + } + + out.Spec.Seed = in.Spec.SeedName + + return nil +} + +func Convert_core_BackupBucketSpec_To_v1alpha1_BackupBucketSpec(in *core.BackupBucketSpec, out *BackupBucketSpec, s conversion.Scope) error { + return autoConvert_core_BackupBucketSpec_To_v1alpha1_BackupBucketSpec(in, out, s) +} + +func Convert_v1alpha1_BackupBucketSpec_To_core_BackupBucketSpec(in *BackupBucketSpec, out *core.BackupBucketSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_BackupBucketSpec_To_core_BackupBucketSpec(in, out, s) +} + +func Convert_v1alpha1_BackupEntry_To_core_BackupEntry(in *BackupEntry, out *core.BackupEntry, s conversion.Scope) error { + if err := autoConvert_v1alpha1_BackupEntry_To_core_BackupEntry(in, out, s); err != nil { + return err + } + + out.Spec.SeedName = in.Spec.Seed + + return nil +} + +func Convert_core_BackupEntry_To_v1alpha1_BackupEntry(in *core.BackupEntry, out *BackupEntry, s conversion.Scope) error { + if err := autoConvert_core_BackupEntry_To_v1alpha1_BackupEntry(in, out, s); err != nil { + return err + } + + out.Spec.Seed = in.Spec.SeedName + + return nil +} + +func Convert_core_BackupEntrySpec_To_v1alpha1_BackupEntrySpec(in *core.BackupEntrySpec, out *BackupEntrySpec, s conversion.Scope) error { + return autoConvert_core_BackupEntrySpec_To_v1alpha1_BackupEntrySpec(in, out, s) +} + +func Convert_v1alpha1_BackupEntrySpec_To_core_BackupEntrySpec(in *BackupEntrySpec, out *core.BackupEntrySpec, s conversion.Scope) error { + return autoConvert_v1alpha1_BackupEntrySpec_To_core_BackupEntrySpec(in, out, s) +} + +func Convert_v1alpha1_Seed_To_core_Seed(in *Seed, out *core.Seed, s conversion.Scope) error { + if err := autoConvert_v1alpha1_Seed_To_core_Seed(in, out, s); err != nil { + return err + } + + out.Spec.Networks.BlockCIDRs = in.Spec.BlockCIDRs + + return nil +} + +func Convert_core_Seed_To_v1alpha1_Seed(in *core.Seed, out *Seed, s conversion.Scope) error { + if err := autoConvert_core_Seed_To_v1alpha1_Seed(in, out, s); err != nil { + return err + } + + out.Spec.BlockCIDRs = in.Spec.Networks.BlockCIDRs + + return nil +} + +func Convert_core_SeedSpec_To_v1alpha1_SeedSpec(in *core.SeedSpec, out *SeedSpec, s conversion.Scope) error { + return autoConvert_core_SeedSpec_To_v1alpha1_SeedSpec(in, out, s) +} + +func Convert_v1alpha1_SeedSpec_To_core_SeedSpec(in *SeedSpec, out *core.SeedSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedSpec_To_core_SeedSpec(in, out, s) +} + +func Convert_core_SeedNetworks_To_v1alpha1_SeedNetworks(in *core.SeedNetworks, out *SeedNetworks, s conversion.Scope) error { + return autoConvert_core_SeedNetworks_To_v1alpha1_SeedNetworks(in, out, s) +} + +func Convert_v1alpha1_SeedNetworks_To_core_SeedNetworks(in *SeedNetworks, out *core.SeedNetworks, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedNetworks_To_core_SeedNetworks(in, out, s) +} + +func Convert_core_SeedStatus_To_v1alpha1_SeedStatus(in *core.SeedStatus, out *SeedStatus, s conversion.Scope) error { + return autoConvert_core_SeedStatus_To_v1alpha1_SeedStatus(in, out, s) +} + +func Convert_core_ShootStatus_To_v1alpha1_ShootStatus(in *core.ShootStatus, out *ShootStatus, s conversion.Scope) error { + if err := autoConvert_core_ShootStatus_To_v1alpha1_ShootStatus(in, out, s); err != nil { + return err + } + + if len(in.LastErrors) != 0 { + out.LastError = (*LastError)(unsafe.Pointer(&in.LastErrors[0])) + if len(in.LastErrors) > 1 { + lastErrors := in.LastErrors[1:] + out.LastErrors = *(*[]LastError)(unsafe.Pointer(&lastErrors)) + } else { + out.LastErrors = nil + } + } + + out.Seed = in.SeedName + + return nil +} + +func Convert_v1alpha1_ShootStatus_To_core_ShootStatus(in *ShootStatus, out *core.ShootStatus, s conversion.Scope) error { + if err := autoConvert_v1alpha1_ShootStatus_To_core_ShootStatus(in, out, s); err != nil { + return err + } + + if in.LastError != nil { + outLastErrors := []core.LastError{ + { + Description: in.LastError.Description, + Codes: *(*[]core.ErrorCode)(unsafe.Pointer(&in.LastError.Codes)), + LastUpdateTime: in.LastError.LastUpdateTime, + }, + } + out.LastErrors = append(outLastErrors, *(*[]core.LastError)(unsafe.Pointer(&in.LastErrors))...) + } else { + out.LastErrors = nil + } + + out.SeedName = in.Seed + + return nil +} + +func Convert_v1alpha1_ProjectSpec_To_core_ProjectSpec(in *ProjectSpec, out *core.ProjectSpec, s conversion.Scope) error { + if err := autoConvert_v1alpha1_ProjectSpec_To_core_ProjectSpec(in, out, s); err != nil { + return err + } + + if owner := out.Owner; owner != nil { + outer: + for i, member := range out.Members { + if member.Name == owner.Name && member.APIGroup == owner.APIGroup && member.Kind == owner.Kind { + // add owner role to the current project's owner if not present + for _, role := range member.Roles { + if role == core.ProjectMemberOwner { + continue outer + } + } + + out.Members[i].Roles = append(out.Members[i].Roles, core.ProjectMemberOwner) + } else { + // delete owner role from all other members + out.Members[i].Roles = removeRoleFromRoles(member.Roles, ProjectMemberOwner) + } + } + } + + return nil +} + +func Convert_core_ProjectSpec_To_v1alpha1_ProjectSpec(in *core.ProjectSpec, out *ProjectSpec, s conversion.Scope) error { + if err := autoConvert_core_ProjectSpec_To_v1alpha1_ProjectSpec(in, out, s); err != nil { + return err + } + + if owner := out.Owner; owner != nil { + outer: + for i, member := range out.Members { + if member.Name == owner.Name && member.APIGroup == owner.APIGroup && member.Kind == owner.Kind { + // add owner role to the current project's owner if not present + if member.Role == core.ProjectMemberOwner { + // remove it from owners list if present + out.Members[i].Roles = removeRoleFromRoles(member.Roles, ProjectMemberOwner) + continue outer + } + for _, role := range member.Roles { + if role == ProjectMemberOwner { + continue outer + } + } + + if out.Members[i].Role == "" { + out.Members[i].Role = core.ProjectMemberOwner + } else { + out.Members[i].Roles = append(out.Members[i].Roles, core.ProjectMemberOwner) + } + } else { + // delete owner role from all other members + out.Members[i].Roles = removeRoleFromRoles(member.Roles, ProjectMemberOwner) + + if member.Role == ProjectMemberOwner { + if len(out.Members[i].Roles) == 0 { + out.Members[i].Role = "" + } else { + out.Members[i].Role = out.Members[i].Roles[0] + if len(out.Members[i].Roles) > 1 { + out.Members[i].Roles = out.Members[i].Roles[1:] + } else { + out.Members[i].Roles = nil + } + } + } + } + } + } + + return nil +} + +func Convert_v1alpha1_ProjectMember_To_core_ProjectMember(in *ProjectMember, out *core.ProjectMember, s conversion.Scope) error { + if err := autoConvert_v1alpha1_ProjectMember_To_core_ProjectMember(in, out, s); err != nil { + return err + } + + if len(in.Role) == 0 { + return nil + } + + // delete in.Role from out.Roles to make sure it gets added to the head + if len(out.Roles) > 0 { + out.Roles = removeRoleFromRoles(out.Roles, in.Role) + } + + // add in.Role to the head of out.Roles + out.Roles = append([]string{in.Role}, out.Roles...) + + return nil +} + +func Convert_core_ProjectMember_To_v1alpha1_ProjectMember(in *core.ProjectMember, out *ProjectMember, s conversion.Scope) error { + if err := autoConvert_core_ProjectMember_To_v1alpha1_ProjectMember(in, out, s); err != nil { + return err + } + + if len(in.Roles) > 0 { + out.Role = in.Roles[0] + out.Roles = in.Roles[1:] + } + + return nil +} + +func removeRoleFromRoles(roles []string, role string) []string { + var newRoles []string + for _, r := range roles { + if r != role { + newRoles = append(newRoles, r) + } + } + return newRoles +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/defaults.go new file mode 100644 index 000000000..349bb6a9a --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/defaults.go @@ -0,0 +1,361 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "math" + "time" + + v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" + "github.com/gardener/gardener/pkg/utils" + versionutils "github.com/gardener/gardener/pkg/utils/version" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/pointer" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +// SetDefaults_SecretBinding sets default values for SecretBinding objects. +func SetDefaults_SecretBinding(obj *SecretBinding) { + if len(obj.SecretRef.Namespace) == 0 { + obj.SecretRef.Namespace = obj.Namespace + } + + for i, quota := range obj.Quotas { + if len(quota.Namespace) == 0 { + obj.Quotas[i].Namespace = obj.Namespace + } + } +} + +// SetDefaults_Project sets default values for Project objects. +func SetDefaults_Project(obj *Project) { + defaultSubject(obj.Spec.Owner) + + for i, member := range obj.Spec.Members { + defaultSubject(&obj.Spec.Members[i].Subject) + + if len(member.Role) == 0 && len(member.Roles) == 0 { + obj.Spec.Members[i].Role = ProjectMemberViewer + } + } + + if obj.Spec.Namespace != nil && *obj.Spec.Namespace == v1beta1constants.GardenNamespace { + if obj.Spec.Tolerations == nil { + obj.Spec.Tolerations = &ProjectTolerations{} + } + addTolerations(&obj.Spec.Tolerations.Whitelist, Toleration{Key: SeedTaintProtected}) + addTolerations(&obj.Spec.Tolerations.Defaults, Toleration{Key: SeedTaintProtected}) + } +} + +func defaultSubject(obj *rbacv1.Subject) { + if obj != nil && len(obj.APIGroup) == 0 { + switch obj.Kind { + case rbacv1.ServiceAccountKind: + obj.APIGroup = "" + case rbacv1.UserKind: + obj.APIGroup = rbacv1.GroupName + case rbacv1.GroupKind: + obj.APIGroup = rbacv1.GroupName + } + } +} + +// SetDefaults_MachineType sets default values for MachineType objects. +func SetDefaults_MachineType(obj *MachineType) { + if obj.Usable == nil { + trueVar := true + obj.Usable = &trueVar + } +} + +// SetDefaults_VolumeType sets default values for VolumeType objects. +func SetDefaults_VolumeType(obj *VolumeType) { + if obj.Usable == nil { + trueVar := true + obj.Usable = &trueVar + } +} + +// SetDefaults_Seed sets default values for Seed objects. +func SetDefaults_Seed(obj *Seed) { + if obj.Spec.Settings == nil { + obj.Spec.Settings = &SeedSettings{} + } + + if obj.Spec.Settings.ExcessCapacityReservation == nil { + obj.Spec.Settings.ExcessCapacityReservation = &SeedSettingExcessCapacityReservation{Enabled: true} + } + + if obj.Spec.Settings.Scheduling == nil { + obj.Spec.Settings.Scheduling = &SeedSettingScheduling{Visible: true} + } + + if obj.Spec.Settings.ShootDNS == nil { + obj.Spec.Settings.ShootDNS = &SeedSettingShootDNS{Enabled: true} + } + + if obj.Spec.Settings.VerticalPodAutoscaler == nil { + obj.Spec.Settings.VerticalPodAutoscaler = &SeedSettingVerticalPodAutoscaler{Enabled: true} + } +} + +// SetDefaults_Shoot sets default values for Shoot objects. +func SetDefaults_Shoot(obj *Shoot) { + k8sVersionLessThan116, _ := versionutils.CompareVersions(obj.Spec.Kubernetes.Version, "<", "1.16") + // Error is ignored here because we cannot do anything meaningful with it. + // k8sVersionLessThan116 will default to `false`. + + if obj.Spec.Kubernetes.AllowPrivilegedContainers == nil { + obj.Spec.Kubernetes.AllowPrivilegedContainers = pointer.BoolPtr(true) + } + + if obj.Spec.Kubernetes.KubeAPIServer == nil { + obj.Spec.Kubernetes.KubeAPIServer = &KubeAPIServerConfig{} + } + if obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication == nil { + if k8sVersionLessThan116 { + obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication = pointer.BoolPtr(true) + } else { + obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication = pointer.BoolPtr(false) + } + } + if obj.Spec.Kubernetes.KubeAPIServer.Requests == nil { + obj.Spec.Kubernetes.KubeAPIServer.Requests = &KubeAPIServerRequests{} + } + if obj.Spec.Kubernetes.KubeAPIServer.Requests.MaxNonMutatingInflight == nil { + obj.Spec.Kubernetes.KubeAPIServer.Requests.MaxNonMutatingInflight = pointer.Int32Ptr(400) + } + if obj.Spec.Kubernetes.KubeAPIServer.Requests.MaxMutatingInflight == nil { + obj.Spec.Kubernetes.KubeAPIServer.Requests.MaxMutatingInflight = pointer.Int32Ptr(200) + } + + if obj.Spec.Kubernetes.KubeControllerManager == nil { + obj.Spec.Kubernetes.KubeControllerManager = &KubeControllerManagerConfig{} + } + if obj.Spec.Kubernetes.KubeControllerManager.NodeCIDRMaskSize == nil { + obj.Spec.Kubernetes.KubeControllerManager.NodeCIDRMaskSize = calculateDefaultNodeCIDRMaskSize(obj.Spec.Kubernetes.Kubelet, obj.Spec.Provider.Workers) + } + if obj.Spec.Kubernetes.KubeControllerManager.PodEvictionTimeout == nil { + obj.Spec.Kubernetes.KubeControllerManager.PodEvictionTimeout = &metav1.Duration{Duration: 2 * time.Minute} + } + + if obj.Spec.Kubernetes.KubeProxy == nil { + obj.Spec.Kubernetes.KubeProxy = &KubeProxyConfig{} + } + if obj.Spec.Kubernetes.KubeProxy.Mode == nil { + defaultProxyMode := ProxyModeIPTables + obj.Spec.Kubernetes.KubeProxy.Mode = &defaultProxyMode + } + + if obj.Spec.Addons == nil { + obj.Spec.Addons = &Addons{} + } + if obj.Spec.Addons.KubernetesDashboard == nil { + obj.Spec.Addons.KubernetesDashboard = &KubernetesDashboard{} + } + if obj.Spec.Addons.KubernetesDashboard.AuthenticationMode == nil { + var defaultAuthMode string + if *obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication { + defaultAuthMode = KubernetesDashboardAuthModeBasic + } else { + defaultAuthMode = KubernetesDashboardAuthModeToken + } + obj.Spec.Addons.KubernetesDashboard.AuthenticationMode = &defaultAuthMode + } + + if obj.Spec.Purpose == nil { + p := ShootPurposeEvaluation + obj.Spec.Purpose = &p + } + + // In previous Gardener versions that weren't supporting tolerations, it was hard-coded to (only) allow shoots in the + // `garden` namespace to use seeds that had the 'protected' taint. In order to be backwards compatible, now with the + // introduction of tolerations, we add the 'protected' toleration to the garden namespace by default. + if obj.Namespace == v1beta1constants.GardenNamespace { + addTolerations(&obj.Spec.Tolerations, Toleration{Key: SeedTaintProtected}) + } + + if obj.Spec.Kubernetes.Kubelet == nil { + obj.Spec.Kubernetes.Kubelet = &KubeletConfig{} + } + if obj.Spec.Kubernetes.Kubelet.FailSwapOn == nil { + obj.Spec.Kubernetes.Kubelet.FailSwapOn = pointer.BoolPtr(true) + } + + var ( + kubeReservedMemory = resource.MustParse("1Gi") + kubeReservedCPU = resource.MustParse("80m") + kubeReservedPID = resource.MustParse("20k") + + k8sVersionGreaterEqual115, _ = versionutils.CompareVersions(obj.Spec.Kubernetes.Version, ">=", "1.15") + ) + + if obj.Spec.Kubernetes.Kubelet.KubeReserved == nil { + obj.Spec.Kubernetes.Kubelet.KubeReserved = &KubeletConfigReserved{Memory: &kubeReservedMemory, CPU: &kubeReservedCPU} + + if k8sVersionGreaterEqual115 { + obj.Spec.Kubernetes.Kubelet.KubeReserved.PID = &kubeReservedPID + } + } else { + if obj.Spec.Kubernetes.Kubelet.KubeReserved.Memory == nil { + obj.Spec.Kubernetes.Kubelet.KubeReserved.Memory = &kubeReservedMemory + } + if obj.Spec.Kubernetes.Kubelet.KubeReserved.CPU == nil { + obj.Spec.Kubernetes.Kubelet.KubeReserved.CPU = &kubeReservedCPU + } + if obj.Spec.Kubernetes.Kubelet.KubeReserved.PID == nil && k8sVersionGreaterEqual115 { + obj.Spec.Kubernetes.Kubelet.KubeReserved.PID = &kubeReservedPID + } + } + + if obj.Spec.Maintenance == nil { + obj.Spec.Maintenance = &Maintenance{} + } +} + +// SetDefaults_Maintenance sets default values for Maintenance objects. +func SetDefaults_Maintenance(obj *Maintenance) { + if obj.AutoUpdate == nil { + obj.AutoUpdate = &MaintenanceAutoUpdate{ + KubernetesVersion: true, + MachineImageVersion: true, + } + } + + if obj.TimeWindow == nil { + mt := utils.RandomMaintenanceTimeWindow() + obj.TimeWindow = &MaintenanceTimeWindow{ + Begin: mt.Begin().Formatted(), + End: mt.End().Formatted(), + } + } +} + +// SetDefaults_VerticalPodAutoscaler sets default values for VerticalPodAutoscaler objects. +func SetDefaults_VerticalPodAutoscaler(obj *VerticalPodAutoscaler) { + if obj.EvictAfterOOMThreshold == nil { + v := DefaultEvictAfterOOMThreshold + obj.EvictAfterOOMThreshold = &v + } + if obj.EvictionRateBurst == nil { + v := DefaultEvictionRateBurst + obj.EvictionRateBurst = &v + } + if obj.EvictionRateLimit == nil { + v := DefaultEvictionRateLimit + obj.EvictionRateLimit = &v + } + if obj.EvictionTolerance == nil { + v := DefaultEvictionTolerance + obj.EvictionTolerance = &v + } + if obj.RecommendationMarginFraction == nil { + v := DefaultRecommendationMarginFraction + obj.RecommendationMarginFraction = &v + } + if obj.UpdaterInterval == nil { + v := DefaultUpdaterInterval + obj.UpdaterInterval = &v + } + if obj.RecommenderInterval == nil { + v := DefaultRecommenderInterval + obj.RecommenderInterval = &v + } +} + +// SetDefaults_Worker sets default values for Worker objects. +func SetDefaults_Worker(obj *Worker) { + if obj.MaxSurge == nil { + obj.MaxSurge = &DefaultWorkerMaxSurge + } + if obj.MaxUnavailable == nil { + obj.MaxUnavailable = &DefaultWorkerMaxUnavailable + } + if obj.SystemComponents == nil { + obj.SystemComponents = &WorkerSystemComponents{ + Allow: DefaultWorkerSystemComponentsAllow, + } + } +} + +// SetDefaults_NginxIngress sets default values for NginxIngress objects. +func SetDefaults_NginxIngress(obj *NginxIngress) { + if obj.ExternalTrafficPolicy == nil { + v := corev1.ServiceExternalTrafficPolicyTypeCluster + obj.ExternalTrafficPolicy = &v + } +} + +// SetDefaults_ControllerResource sets default values for ControllerResource objects. +func SetDefaults_ControllerResource(obj *ControllerResource) { + if obj.Primary == nil { + obj.Primary = pointer.BoolPtr(true) + } +} + +// SetDefaults_ControllerDeployment sets default values for ControllerDeployment objects. +func SetDefaults_ControllerDeployment(obj *ControllerDeployment) { + p := ControllerDeploymentPolicyOnDemand + if obj.Policy == nil { + obj.Policy = &p + } +} + +// Helper functions + +func calculateDefaultNodeCIDRMaskSize(kubelet *KubeletConfig, workers []Worker) *int32 { + var maxPods int32 = 110 // default maxPods setting on kubelet + + if kubelet != nil && kubelet.MaxPods != nil { + maxPods = *kubelet.MaxPods + } + + for _, worker := range workers { + if worker.Kubernetes != nil && worker.Kubernetes.Kubelet != nil && worker.Kubernetes.Kubelet.MaxPods != nil && *worker.Kubernetes.Kubelet.MaxPods > maxPods { + maxPods = *worker.Kubernetes.Kubelet.MaxPods + } + } + + // by having approximately twice as many available IP addresses as possible Pods, Kubernetes is able to mitigate IP address reuse as Pods are added to and removed from a node. + nodeCidrRange := int32(32 - int(math.Ceil(math.Log2(float64(maxPods*2))))) + return &nodeCidrRange +} + +func addTolerations(tolerations *[]Toleration, additionalTolerations ...Toleration) { + existingTolerations := sets.NewString() + for _, toleration := range *tolerations { + existingTolerations.Insert(utils.IDForKeyWithOptionalValue(toleration.Key, toleration.Value)) + } + + for _, toleration := range additionalTolerations { + if existingTolerations.Has(toleration.Key) { + continue + } + if existingTolerations.Has(utils.IDForKeyWithOptionalValue(toleration.Key, toleration.Value)) { + continue + } + *tolerations = append(*tolerations, toleration) + } +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/doc.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/doc.go new file mode 100644 index 000000000..ac2cfd19b --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v1alpha1 is the v1alpha1 version of the API. +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/gardener/gardener/pkg/apis/core +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta +// +k8s:protobuf-gen=package + +// Package v1alpha1 is a version of the API. +// +groupName=core.gardener.cloud +package v1alpha1 diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.pb.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.pb.go new file mode 100644 index 000000000..5ddc5f21b --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.pb.go @@ -0,0 +1,40856 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.proto + +package v1alpha1 + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + + v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + v13 "k8s.io/api/rbac/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v11 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Addon) Reset() { *m = Addon{} } +func (*Addon) ProtoMessage() {} +func (*Addon) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{0} +} +func (m *Addon) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Addon) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Addon) XXX_Merge(src proto.Message) { + xxx_messageInfo_Addon.Merge(m, src) +} +func (m *Addon) XXX_Size() int { + return m.Size() +} +func (m *Addon) XXX_DiscardUnknown() { + xxx_messageInfo_Addon.DiscardUnknown(m) +} + +var xxx_messageInfo_Addon proto.InternalMessageInfo + +func (m *Addons) Reset() { *m = Addons{} } +func (*Addons) ProtoMessage() {} +func (*Addons) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{1} +} +func (m *Addons) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Addons) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Addons) XXX_Merge(src proto.Message) { + xxx_messageInfo_Addons.Merge(m, src) +} +func (m *Addons) XXX_Size() int { + return m.Size() +} +func (m *Addons) XXX_DiscardUnknown() { + xxx_messageInfo_Addons.DiscardUnknown(m) +} + +var xxx_messageInfo_Addons proto.InternalMessageInfo + +func (m *AdmissionPlugin) Reset() { *m = AdmissionPlugin{} } +func (*AdmissionPlugin) ProtoMessage() {} +func (*AdmissionPlugin) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{2} +} +func (m *AdmissionPlugin) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AdmissionPlugin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AdmissionPlugin) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdmissionPlugin.Merge(m, src) +} +func (m *AdmissionPlugin) XXX_Size() int { + return m.Size() +} +func (m *AdmissionPlugin) XXX_DiscardUnknown() { + xxx_messageInfo_AdmissionPlugin.DiscardUnknown(m) +} + +var xxx_messageInfo_AdmissionPlugin proto.InternalMessageInfo + +func (m *Alerting) Reset() { *m = Alerting{} } +func (*Alerting) ProtoMessage() {} +func (*Alerting) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{3} +} +func (m *Alerting) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Alerting) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Alerting) XXX_Merge(src proto.Message) { + xxx_messageInfo_Alerting.Merge(m, src) +} +func (m *Alerting) XXX_Size() int { + return m.Size() +} +func (m *Alerting) XXX_DiscardUnknown() { + xxx_messageInfo_Alerting.DiscardUnknown(m) +} + +var xxx_messageInfo_Alerting proto.InternalMessageInfo + +func (m *AuditConfig) Reset() { *m = AuditConfig{} } +func (*AuditConfig) ProtoMessage() {} +func (*AuditConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{4} +} +func (m *AuditConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditConfig.Merge(m, src) +} +func (m *AuditConfig) XXX_Size() int { + return m.Size() +} +func (m *AuditConfig) XXX_DiscardUnknown() { + xxx_messageInfo_AuditConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditConfig proto.InternalMessageInfo + +func (m *AuditPolicy) Reset() { *m = AuditPolicy{} } +func (*AuditPolicy) ProtoMessage() {} +func (*AuditPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{5} +} +func (m *AuditPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditPolicy.Merge(m, src) +} +func (m *AuditPolicy) XXX_Size() int { + return m.Size() +} +func (m *AuditPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_AuditPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditPolicy proto.InternalMessageInfo + +func (m *AvailabilityZone) Reset() { *m = AvailabilityZone{} } +func (*AvailabilityZone) ProtoMessage() {} +func (*AvailabilityZone) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{6} +} +func (m *AvailabilityZone) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AvailabilityZone) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AvailabilityZone) XXX_Merge(src proto.Message) { + xxx_messageInfo_AvailabilityZone.Merge(m, src) +} +func (m *AvailabilityZone) XXX_Size() int { + return m.Size() +} +func (m *AvailabilityZone) XXX_DiscardUnknown() { + xxx_messageInfo_AvailabilityZone.DiscardUnknown(m) +} + +var xxx_messageInfo_AvailabilityZone proto.InternalMessageInfo + +func (m *BackupBucket) Reset() { *m = BackupBucket{} } +func (*BackupBucket) ProtoMessage() {} +func (*BackupBucket) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{7} +} +func (m *BackupBucket) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BackupBucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BackupBucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupBucket.Merge(m, src) +} +func (m *BackupBucket) XXX_Size() int { + return m.Size() +} +func (m *BackupBucket) XXX_DiscardUnknown() { + xxx_messageInfo_BackupBucket.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupBucket proto.InternalMessageInfo + +func (m *BackupBucketList) Reset() { *m = BackupBucketList{} } +func (*BackupBucketList) ProtoMessage() {} +func (*BackupBucketList) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{8} +} +func (m *BackupBucketList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BackupBucketList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BackupBucketList) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupBucketList.Merge(m, src) +} +func (m *BackupBucketList) XXX_Size() int { + return m.Size() +} +func (m *BackupBucketList) XXX_DiscardUnknown() { + xxx_messageInfo_BackupBucketList.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupBucketList proto.InternalMessageInfo + +func (m *BackupBucketProvider) Reset() { *m = BackupBucketProvider{} } +func (*BackupBucketProvider) ProtoMessage() {} +func (*BackupBucketProvider) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{9} +} +func (m *BackupBucketProvider) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BackupBucketProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BackupBucketProvider) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupBucketProvider.Merge(m, src) +} +func (m *BackupBucketProvider) XXX_Size() int { + return m.Size() +} +func (m *BackupBucketProvider) XXX_DiscardUnknown() { + xxx_messageInfo_BackupBucketProvider.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupBucketProvider proto.InternalMessageInfo + +func (m *BackupBucketSpec) Reset() { *m = BackupBucketSpec{} } +func (*BackupBucketSpec) ProtoMessage() {} +func (*BackupBucketSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{10} +} +func (m *BackupBucketSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BackupBucketSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BackupBucketSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupBucketSpec.Merge(m, src) +} +func (m *BackupBucketSpec) XXX_Size() int { + return m.Size() +} +func (m *BackupBucketSpec) XXX_DiscardUnknown() { + xxx_messageInfo_BackupBucketSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupBucketSpec proto.InternalMessageInfo + +func (m *BackupBucketStatus) Reset() { *m = BackupBucketStatus{} } +func (*BackupBucketStatus) ProtoMessage() {} +func (*BackupBucketStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{11} +} +func (m *BackupBucketStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BackupBucketStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BackupBucketStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupBucketStatus.Merge(m, src) +} +func (m *BackupBucketStatus) XXX_Size() int { + return m.Size() +} +func (m *BackupBucketStatus) XXX_DiscardUnknown() { + xxx_messageInfo_BackupBucketStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupBucketStatus proto.InternalMessageInfo + +func (m *BackupEntry) Reset() { *m = BackupEntry{} } +func (*BackupEntry) ProtoMessage() {} +func (*BackupEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{12} +} +func (m *BackupEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BackupEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BackupEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupEntry.Merge(m, src) +} +func (m *BackupEntry) XXX_Size() int { + return m.Size() +} +func (m *BackupEntry) XXX_DiscardUnknown() { + xxx_messageInfo_BackupEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupEntry proto.InternalMessageInfo + +func (m *BackupEntryList) Reset() { *m = BackupEntryList{} } +func (*BackupEntryList) ProtoMessage() {} +func (*BackupEntryList) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{13} +} +func (m *BackupEntryList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BackupEntryList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BackupEntryList) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupEntryList.Merge(m, src) +} +func (m *BackupEntryList) XXX_Size() int { + return m.Size() +} +func (m *BackupEntryList) XXX_DiscardUnknown() { + xxx_messageInfo_BackupEntryList.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupEntryList proto.InternalMessageInfo + +func (m *BackupEntrySpec) Reset() { *m = BackupEntrySpec{} } +func (*BackupEntrySpec) ProtoMessage() {} +func (*BackupEntrySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{14} +} +func (m *BackupEntrySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BackupEntrySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BackupEntrySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupEntrySpec.Merge(m, src) +} +func (m *BackupEntrySpec) XXX_Size() int { + return m.Size() +} +func (m *BackupEntrySpec) XXX_DiscardUnknown() { + xxx_messageInfo_BackupEntrySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupEntrySpec proto.InternalMessageInfo + +func (m *BackupEntryStatus) Reset() { *m = BackupEntryStatus{} } +func (*BackupEntryStatus) ProtoMessage() {} +func (*BackupEntryStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{15} +} +func (m *BackupEntryStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BackupEntryStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BackupEntryStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_BackupEntryStatus.Merge(m, src) +} +func (m *BackupEntryStatus) XXX_Size() int { + return m.Size() +} +func (m *BackupEntryStatus) XXX_DiscardUnknown() { + xxx_messageInfo_BackupEntryStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_BackupEntryStatus proto.InternalMessageInfo + +func (m *CRI) Reset() { *m = CRI{} } +func (*CRI) ProtoMessage() {} +func (*CRI) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{16} +} +func (m *CRI) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CRI) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CRI) XXX_Merge(src proto.Message) { + xxx_messageInfo_CRI.Merge(m, src) +} +func (m *CRI) XXX_Size() int { + return m.Size() +} +func (m *CRI) XXX_DiscardUnknown() { + xxx_messageInfo_CRI.DiscardUnknown(m) +} + +var xxx_messageInfo_CRI proto.InternalMessageInfo + +func (m *CloudInfo) Reset() { *m = CloudInfo{} } +func (*CloudInfo) ProtoMessage() {} +func (*CloudInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{17} +} +func (m *CloudInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CloudInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CloudInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloudInfo.Merge(m, src) +} +func (m *CloudInfo) XXX_Size() int { + return m.Size() +} +func (m *CloudInfo) XXX_DiscardUnknown() { + xxx_messageInfo_CloudInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_CloudInfo proto.InternalMessageInfo + +func (m *CloudProfile) Reset() { *m = CloudProfile{} } +func (*CloudProfile) ProtoMessage() {} +func (*CloudProfile) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{18} +} +func (m *CloudProfile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CloudProfile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CloudProfile) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloudProfile.Merge(m, src) +} +func (m *CloudProfile) XXX_Size() int { + return m.Size() +} +func (m *CloudProfile) XXX_DiscardUnknown() { + xxx_messageInfo_CloudProfile.DiscardUnknown(m) +} + +var xxx_messageInfo_CloudProfile proto.InternalMessageInfo + +func (m *CloudProfileList) Reset() { *m = CloudProfileList{} } +func (*CloudProfileList) ProtoMessage() {} +func (*CloudProfileList) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{19} +} +func (m *CloudProfileList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CloudProfileList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CloudProfileList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloudProfileList.Merge(m, src) +} +func (m *CloudProfileList) XXX_Size() int { + return m.Size() +} +func (m *CloudProfileList) XXX_DiscardUnknown() { + xxx_messageInfo_CloudProfileList.DiscardUnknown(m) +} + +var xxx_messageInfo_CloudProfileList proto.InternalMessageInfo + +func (m *CloudProfileSpec) Reset() { *m = CloudProfileSpec{} } +func (*CloudProfileSpec) ProtoMessage() {} +func (*CloudProfileSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{20} +} +func (m *CloudProfileSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CloudProfileSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CloudProfileSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloudProfileSpec.Merge(m, src) +} +func (m *CloudProfileSpec) XXX_Size() int { + return m.Size() +} +func (m *CloudProfileSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CloudProfileSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CloudProfileSpec proto.InternalMessageInfo + +func (m *ClusterAutoscaler) Reset() { *m = ClusterAutoscaler{} } +func (*ClusterAutoscaler) ProtoMessage() {} +func (*ClusterAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{21} +} +func (m *ClusterAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterAutoscaler.Merge(m, src) +} +func (m *ClusterAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *ClusterAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterAutoscaler.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterAutoscaler proto.InternalMessageInfo + +func (m *ClusterInfo) Reset() { *m = ClusterInfo{} } +func (*ClusterInfo) ProtoMessage() {} +func (*ClusterInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{22} +} +func (m *ClusterInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterInfo.Merge(m, src) +} +func (m *ClusterInfo) XXX_Size() int { + return m.Size() +} +func (m *ClusterInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterInfo proto.InternalMessageInfo + +func (m *Condition) Reset() { *m = Condition{} } +func (*Condition) ProtoMessage() {} +func (*Condition) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{23} +} +func (m *Condition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Condition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Condition) XXX_Merge(src proto.Message) { + xxx_messageInfo_Condition.Merge(m, src) +} +func (m *Condition) XXX_Size() int { + return m.Size() +} +func (m *Condition) XXX_DiscardUnknown() { + xxx_messageInfo_Condition.DiscardUnknown(m) +} + +var xxx_messageInfo_Condition proto.InternalMessageInfo + +func (m *ContainerRuntime) Reset() { *m = ContainerRuntime{} } +func (*ContainerRuntime) ProtoMessage() {} +func (*ContainerRuntime) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{24} +} +func (m *ContainerRuntime) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerRuntime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerRuntime) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerRuntime.Merge(m, src) +} +func (m *ContainerRuntime) XXX_Size() int { + return m.Size() +} +func (m *ContainerRuntime) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerRuntime.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerRuntime proto.InternalMessageInfo + +func (m *ControllerDeployment) Reset() { *m = ControllerDeployment{} } +func (*ControllerDeployment) ProtoMessage() {} +func (*ControllerDeployment) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{25} +} +func (m *ControllerDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerDeployment.Merge(m, src) +} +func (m *ControllerDeployment) XXX_Size() int { + return m.Size() +} +func (m *ControllerDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerDeployment proto.InternalMessageInfo + +func (m *ControllerInstallation) Reset() { *m = ControllerInstallation{} } +func (*ControllerInstallation) ProtoMessage() {} +func (*ControllerInstallation) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{26} +} +func (m *ControllerInstallation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerInstallation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerInstallation) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerInstallation.Merge(m, src) +} +func (m *ControllerInstallation) XXX_Size() int { + return m.Size() +} +func (m *ControllerInstallation) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerInstallation.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerInstallation proto.InternalMessageInfo + +func (m *ControllerInstallationList) Reset() { *m = ControllerInstallationList{} } +func (*ControllerInstallationList) ProtoMessage() {} +func (*ControllerInstallationList) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{27} +} +func (m *ControllerInstallationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerInstallationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerInstallationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerInstallationList.Merge(m, src) +} +func (m *ControllerInstallationList) XXX_Size() int { + return m.Size() +} +func (m *ControllerInstallationList) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerInstallationList.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerInstallationList proto.InternalMessageInfo + +func (m *ControllerInstallationSpec) Reset() { *m = ControllerInstallationSpec{} } +func (*ControllerInstallationSpec) ProtoMessage() {} +func (*ControllerInstallationSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{28} +} +func (m *ControllerInstallationSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerInstallationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerInstallationSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerInstallationSpec.Merge(m, src) +} +func (m *ControllerInstallationSpec) XXX_Size() int { + return m.Size() +} +func (m *ControllerInstallationSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerInstallationSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerInstallationSpec proto.InternalMessageInfo + +func (m *ControllerInstallationStatus) Reset() { *m = ControllerInstallationStatus{} } +func (*ControllerInstallationStatus) ProtoMessage() {} +func (*ControllerInstallationStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{29} +} +func (m *ControllerInstallationStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerInstallationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerInstallationStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerInstallationStatus.Merge(m, src) +} +func (m *ControllerInstallationStatus) XXX_Size() int { + return m.Size() +} +func (m *ControllerInstallationStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerInstallationStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerInstallationStatus proto.InternalMessageInfo + +func (m *ControllerRegistration) Reset() { *m = ControllerRegistration{} } +func (*ControllerRegistration) ProtoMessage() {} +func (*ControllerRegistration) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{30} +} +func (m *ControllerRegistration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRegistration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRegistration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRegistration.Merge(m, src) +} +func (m *ControllerRegistration) XXX_Size() int { + return m.Size() +} +func (m *ControllerRegistration) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRegistration.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRegistration proto.InternalMessageInfo + +func (m *ControllerRegistrationList) Reset() { *m = ControllerRegistrationList{} } +func (*ControllerRegistrationList) ProtoMessage() {} +func (*ControllerRegistrationList) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{31} +} +func (m *ControllerRegistrationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRegistrationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRegistrationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRegistrationList.Merge(m, src) +} +func (m *ControllerRegistrationList) XXX_Size() int { + return m.Size() +} +func (m *ControllerRegistrationList) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRegistrationList.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRegistrationList proto.InternalMessageInfo + +func (m *ControllerRegistrationSpec) Reset() { *m = ControllerRegistrationSpec{} } +func (*ControllerRegistrationSpec) ProtoMessage() {} +func (*ControllerRegistrationSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{32} +} +func (m *ControllerRegistrationSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRegistrationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRegistrationSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRegistrationSpec.Merge(m, src) +} +func (m *ControllerRegistrationSpec) XXX_Size() int { + return m.Size() +} +func (m *ControllerRegistrationSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRegistrationSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRegistrationSpec proto.InternalMessageInfo + +func (m *ControllerResource) Reset() { *m = ControllerResource{} } +func (*ControllerResource) ProtoMessage() {} +func (*ControllerResource) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{33} +} +func (m *ControllerResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerResource.Merge(m, src) +} +func (m *ControllerResource) XXX_Size() int { + return m.Size() +} +func (m *ControllerResource) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerResource.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerResource proto.InternalMessageInfo + +func (m *DNS) Reset() { *m = DNS{} } +func (*DNS) ProtoMessage() {} +func (*DNS) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{34} +} +func (m *DNS) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DNS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DNS) XXX_Merge(src proto.Message) { + xxx_messageInfo_DNS.Merge(m, src) +} +func (m *DNS) XXX_Size() int { + return m.Size() +} +func (m *DNS) XXX_DiscardUnknown() { + xxx_messageInfo_DNS.DiscardUnknown(m) +} + +var xxx_messageInfo_DNS proto.InternalMessageInfo + +func (m *DNSIncludeExclude) Reset() { *m = DNSIncludeExclude{} } +func (*DNSIncludeExclude) ProtoMessage() {} +func (*DNSIncludeExclude) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{35} +} +func (m *DNSIncludeExclude) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DNSIncludeExclude) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DNSIncludeExclude) XXX_Merge(src proto.Message) { + xxx_messageInfo_DNSIncludeExclude.Merge(m, src) +} +func (m *DNSIncludeExclude) XXX_Size() int { + return m.Size() +} +func (m *DNSIncludeExclude) XXX_DiscardUnknown() { + xxx_messageInfo_DNSIncludeExclude.DiscardUnknown(m) +} + +var xxx_messageInfo_DNSIncludeExclude proto.InternalMessageInfo + +func (m *DNSProvider) Reset() { *m = DNSProvider{} } +func (*DNSProvider) ProtoMessage() {} +func (*DNSProvider) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{36} +} +func (m *DNSProvider) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DNSProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DNSProvider) XXX_Merge(src proto.Message) { + xxx_messageInfo_DNSProvider.Merge(m, src) +} +func (m *DNSProvider) XXX_Size() int { + return m.Size() +} +func (m *DNSProvider) XXX_DiscardUnknown() { + xxx_messageInfo_DNSProvider.DiscardUnknown(m) +} + +var xxx_messageInfo_DNSProvider proto.InternalMessageInfo + +func (m *DataVolume) Reset() { *m = DataVolume{} } +func (*DataVolume) ProtoMessage() {} +func (*DataVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{37} +} +func (m *DataVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DataVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DataVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_DataVolume.Merge(m, src) +} +func (m *DataVolume) XXX_Size() int { + return m.Size() +} +func (m *DataVolume) XXX_DiscardUnknown() { + xxx_messageInfo_DataVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_DataVolume proto.InternalMessageInfo + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{38} +} +func (m *Endpoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Endpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoint.Merge(m, src) +} +func (m *Endpoint) XXX_Size() int { + return m.Size() +} +func (m *Endpoint) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_Endpoint proto.InternalMessageInfo + +func (m *ExpirableVersion) Reset() { *m = ExpirableVersion{} } +func (*ExpirableVersion) ProtoMessage() {} +func (*ExpirableVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{39} +} +func (m *ExpirableVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExpirableVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExpirableVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExpirableVersion.Merge(m, src) +} +func (m *ExpirableVersion) XXX_Size() int { + return m.Size() +} +func (m *ExpirableVersion) XXX_DiscardUnknown() { + xxx_messageInfo_ExpirableVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_ExpirableVersion proto.InternalMessageInfo + +func (m *Extension) Reset() { *m = Extension{} } +func (*Extension) ProtoMessage() {} +func (*Extension) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{40} +} +func (m *Extension) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Extension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Extension) XXX_Merge(src proto.Message) { + xxx_messageInfo_Extension.Merge(m, src) +} +func (m *Extension) XXX_Size() int { + return m.Size() +} +func (m *Extension) XXX_DiscardUnknown() { + xxx_messageInfo_Extension.DiscardUnknown(m) +} + +var xxx_messageInfo_Extension proto.InternalMessageInfo + +func (m *ExtensionResourceState) Reset() { *m = ExtensionResourceState{} } +func (*ExtensionResourceState) ProtoMessage() {} +func (*ExtensionResourceState) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{41} +} +func (m *ExtensionResourceState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtensionResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtensionResourceState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionResourceState.Merge(m, src) +} +func (m *ExtensionResourceState) XXX_Size() int { + return m.Size() +} +func (m *ExtensionResourceState) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionResourceState.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionResourceState proto.InternalMessageInfo + +func (m *Gardener) Reset() { *m = Gardener{} } +func (*Gardener) ProtoMessage() {} +func (*Gardener) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{42} +} +func (m *Gardener) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Gardener) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Gardener) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gardener.Merge(m, src) +} +func (m *Gardener) XXX_Size() int { + return m.Size() +} +func (m *Gardener) XXX_DiscardUnknown() { + xxx_messageInfo_Gardener.DiscardUnknown(m) +} + +var xxx_messageInfo_Gardener proto.InternalMessageInfo + +func (m *GardenerResourceData) Reset() { *m = GardenerResourceData{} } +func (*GardenerResourceData) ProtoMessage() {} +func (*GardenerResourceData) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{43} +} +func (m *GardenerResourceData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GardenerResourceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GardenerResourceData) XXX_Merge(src proto.Message) { + xxx_messageInfo_GardenerResourceData.Merge(m, src) +} +func (m *GardenerResourceData) XXX_Size() int { + return m.Size() +} +func (m *GardenerResourceData) XXX_DiscardUnknown() { + xxx_messageInfo_GardenerResourceData.DiscardUnknown(m) +} + +var xxx_messageInfo_GardenerResourceData proto.InternalMessageInfo + +func (m *Hibernation) Reset() { *m = Hibernation{} } +func (*Hibernation) ProtoMessage() {} +func (*Hibernation) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{44} +} +func (m *Hibernation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Hibernation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Hibernation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Hibernation.Merge(m, src) +} +func (m *Hibernation) XXX_Size() int { + return m.Size() +} +func (m *Hibernation) XXX_DiscardUnknown() { + xxx_messageInfo_Hibernation.DiscardUnknown(m) +} + +var xxx_messageInfo_Hibernation proto.InternalMessageInfo + +func (m *HibernationSchedule) Reset() { *m = HibernationSchedule{} } +func (*HibernationSchedule) ProtoMessage() {} +func (*HibernationSchedule) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{45} +} +func (m *HibernationSchedule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HibernationSchedule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HibernationSchedule) XXX_Merge(src proto.Message) { + xxx_messageInfo_HibernationSchedule.Merge(m, src) +} +func (m *HibernationSchedule) XXX_Size() int { + return m.Size() +} +func (m *HibernationSchedule) XXX_DiscardUnknown() { + xxx_messageInfo_HibernationSchedule.DiscardUnknown(m) +} + +var xxx_messageInfo_HibernationSchedule proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerConfig) Reset() { *m = HorizontalPodAutoscalerConfig{} } +func (*HorizontalPodAutoscalerConfig) ProtoMessage() {} +func (*HorizontalPodAutoscalerConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{46} +} +func (m *HorizontalPodAutoscalerConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerConfig.Merge(m, src) +} +func (m *HorizontalPodAutoscalerConfig) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerConfig) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerConfig proto.InternalMessageInfo + +func (m *Ingress) Reset() { *m = Ingress{} } +func (*Ingress) ProtoMessage() {} +func (*Ingress) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{47} +} +func (m *Ingress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Ingress) XXX_Merge(src proto.Message) { + xxx_messageInfo_Ingress.Merge(m, src) +} +func (m *Ingress) XXX_Size() int { + return m.Size() +} +func (m *Ingress) XXX_DiscardUnknown() { + xxx_messageInfo_Ingress.DiscardUnknown(m) +} + +var xxx_messageInfo_Ingress proto.InternalMessageInfo + +func (m *IngressController) Reset() { *m = IngressController{} } +func (*IngressController) ProtoMessage() {} +func (*IngressController) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{48} +} +func (m *IngressController) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressController) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressController) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressController.Merge(m, src) +} +func (m *IngressController) XXX_Size() int { + return m.Size() +} +func (m *IngressController) XXX_DiscardUnknown() { + xxx_messageInfo_IngressController.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressController proto.InternalMessageInfo + +func (m *KubeAPIServerConfig) Reset() { *m = KubeAPIServerConfig{} } +func (*KubeAPIServerConfig) ProtoMessage() {} +func (*KubeAPIServerConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{49} +} +func (m *KubeAPIServerConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KubeAPIServerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KubeAPIServerConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_KubeAPIServerConfig.Merge(m, src) +} +func (m *KubeAPIServerConfig) XXX_Size() int { + return m.Size() +} +func (m *KubeAPIServerConfig) XXX_DiscardUnknown() { + xxx_messageInfo_KubeAPIServerConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_KubeAPIServerConfig proto.InternalMessageInfo + +func (m *KubeAPIServerRequests) Reset() { *m = KubeAPIServerRequests{} } +func (*KubeAPIServerRequests) ProtoMessage() {} +func (*KubeAPIServerRequests) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{50} +} +func (m *KubeAPIServerRequests) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KubeAPIServerRequests) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KubeAPIServerRequests) XXX_Merge(src proto.Message) { + xxx_messageInfo_KubeAPIServerRequests.Merge(m, src) +} +func (m *KubeAPIServerRequests) XXX_Size() int { + return m.Size() +} +func (m *KubeAPIServerRequests) XXX_DiscardUnknown() { + xxx_messageInfo_KubeAPIServerRequests.DiscardUnknown(m) +} + +var xxx_messageInfo_KubeAPIServerRequests proto.InternalMessageInfo + +func (m *KubeControllerManagerConfig) Reset() { *m = KubeControllerManagerConfig{} } +func (*KubeControllerManagerConfig) ProtoMessage() {} +func (*KubeControllerManagerConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{51} +} +func (m *KubeControllerManagerConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KubeControllerManagerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KubeControllerManagerConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_KubeControllerManagerConfig.Merge(m, src) +} +func (m *KubeControllerManagerConfig) XXX_Size() int { + return m.Size() +} +func (m *KubeControllerManagerConfig) XXX_DiscardUnknown() { + xxx_messageInfo_KubeControllerManagerConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_KubeControllerManagerConfig proto.InternalMessageInfo + +func (m *KubeProxyConfig) Reset() { *m = KubeProxyConfig{} } +func (*KubeProxyConfig) ProtoMessage() {} +func (*KubeProxyConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{52} +} +func (m *KubeProxyConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KubeProxyConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KubeProxyConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_KubeProxyConfig.Merge(m, src) +} +func (m *KubeProxyConfig) XXX_Size() int { + return m.Size() +} +func (m *KubeProxyConfig) XXX_DiscardUnknown() { + xxx_messageInfo_KubeProxyConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_KubeProxyConfig proto.InternalMessageInfo + +func (m *KubeSchedulerConfig) Reset() { *m = KubeSchedulerConfig{} } +func (*KubeSchedulerConfig) ProtoMessage() {} +func (*KubeSchedulerConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{53} +} +func (m *KubeSchedulerConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KubeSchedulerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KubeSchedulerConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_KubeSchedulerConfig.Merge(m, src) +} +func (m *KubeSchedulerConfig) XXX_Size() int { + return m.Size() +} +func (m *KubeSchedulerConfig) XXX_DiscardUnknown() { + xxx_messageInfo_KubeSchedulerConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_KubeSchedulerConfig proto.InternalMessageInfo + +func (m *KubeletConfig) Reset() { *m = KubeletConfig{} } +func (*KubeletConfig) ProtoMessage() {} +func (*KubeletConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{54} +} +func (m *KubeletConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KubeletConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KubeletConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_KubeletConfig.Merge(m, src) +} +func (m *KubeletConfig) XXX_Size() int { + return m.Size() +} +func (m *KubeletConfig) XXX_DiscardUnknown() { + xxx_messageInfo_KubeletConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_KubeletConfig proto.InternalMessageInfo + +func (m *KubeletConfigEviction) Reset() { *m = KubeletConfigEviction{} } +func (*KubeletConfigEviction) ProtoMessage() {} +func (*KubeletConfigEviction) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{55} +} +func (m *KubeletConfigEviction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KubeletConfigEviction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KubeletConfigEviction) XXX_Merge(src proto.Message) { + xxx_messageInfo_KubeletConfigEviction.Merge(m, src) +} +func (m *KubeletConfigEviction) XXX_Size() int { + return m.Size() +} +func (m *KubeletConfigEviction) XXX_DiscardUnknown() { + xxx_messageInfo_KubeletConfigEviction.DiscardUnknown(m) +} + +var xxx_messageInfo_KubeletConfigEviction proto.InternalMessageInfo + +func (m *KubeletConfigEvictionMinimumReclaim) Reset() { *m = KubeletConfigEvictionMinimumReclaim{} } +func (*KubeletConfigEvictionMinimumReclaim) ProtoMessage() {} +func (*KubeletConfigEvictionMinimumReclaim) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{56} +} +func (m *KubeletConfigEvictionMinimumReclaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KubeletConfigEvictionMinimumReclaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KubeletConfigEvictionMinimumReclaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_KubeletConfigEvictionMinimumReclaim.Merge(m, src) +} +func (m *KubeletConfigEvictionMinimumReclaim) XXX_Size() int { + return m.Size() +} +func (m *KubeletConfigEvictionMinimumReclaim) XXX_DiscardUnknown() { + xxx_messageInfo_KubeletConfigEvictionMinimumReclaim.DiscardUnknown(m) +} + +var xxx_messageInfo_KubeletConfigEvictionMinimumReclaim proto.InternalMessageInfo + +func (m *KubeletConfigEvictionSoftGracePeriod) Reset() { *m = KubeletConfigEvictionSoftGracePeriod{} } +func (*KubeletConfigEvictionSoftGracePeriod) ProtoMessage() {} +func (*KubeletConfigEvictionSoftGracePeriod) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{57} +} +func (m *KubeletConfigEvictionSoftGracePeriod) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KubeletConfigEvictionSoftGracePeriod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KubeletConfigEvictionSoftGracePeriod) XXX_Merge(src proto.Message) { + xxx_messageInfo_KubeletConfigEvictionSoftGracePeriod.Merge(m, src) +} +func (m *KubeletConfigEvictionSoftGracePeriod) XXX_Size() int { + return m.Size() +} +func (m *KubeletConfigEvictionSoftGracePeriod) XXX_DiscardUnknown() { + xxx_messageInfo_KubeletConfigEvictionSoftGracePeriod.DiscardUnknown(m) +} + +var xxx_messageInfo_KubeletConfigEvictionSoftGracePeriod proto.InternalMessageInfo + +func (m *KubeletConfigReserved) Reset() { *m = KubeletConfigReserved{} } +func (*KubeletConfigReserved) ProtoMessage() {} +func (*KubeletConfigReserved) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{58} +} +func (m *KubeletConfigReserved) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KubeletConfigReserved) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KubeletConfigReserved) XXX_Merge(src proto.Message) { + xxx_messageInfo_KubeletConfigReserved.Merge(m, src) +} +func (m *KubeletConfigReserved) XXX_Size() int { + return m.Size() +} +func (m *KubeletConfigReserved) XXX_DiscardUnknown() { + xxx_messageInfo_KubeletConfigReserved.DiscardUnknown(m) +} + +var xxx_messageInfo_KubeletConfigReserved proto.InternalMessageInfo + +func (m *Kubernetes) Reset() { *m = Kubernetes{} } +func (*Kubernetes) ProtoMessage() {} +func (*Kubernetes) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{59} +} +func (m *Kubernetes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Kubernetes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Kubernetes) XXX_Merge(src proto.Message) { + xxx_messageInfo_Kubernetes.Merge(m, src) +} +func (m *Kubernetes) XXX_Size() int { + return m.Size() +} +func (m *Kubernetes) XXX_DiscardUnknown() { + xxx_messageInfo_Kubernetes.DiscardUnknown(m) +} + +var xxx_messageInfo_Kubernetes proto.InternalMessageInfo + +func (m *KubernetesConfig) Reset() { *m = KubernetesConfig{} } +func (*KubernetesConfig) ProtoMessage() {} +func (*KubernetesConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{60} +} +func (m *KubernetesConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KubernetesConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KubernetesConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_KubernetesConfig.Merge(m, src) +} +func (m *KubernetesConfig) XXX_Size() int { + return m.Size() +} +func (m *KubernetesConfig) XXX_DiscardUnknown() { + xxx_messageInfo_KubernetesConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_KubernetesConfig proto.InternalMessageInfo + +func (m *KubernetesDashboard) Reset() { *m = KubernetesDashboard{} } +func (*KubernetesDashboard) ProtoMessage() {} +func (*KubernetesDashboard) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{61} +} +func (m *KubernetesDashboard) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KubernetesDashboard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KubernetesDashboard) XXX_Merge(src proto.Message) { + xxx_messageInfo_KubernetesDashboard.Merge(m, src) +} +func (m *KubernetesDashboard) XXX_Size() int { + return m.Size() +} +func (m *KubernetesDashboard) XXX_DiscardUnknown() { + xxx_messageInfo_KubernetesDashboard.DiscardUnknown(m) +} + +var xxx_messageInfo_KubernetesDashboard proto.InternalMessageInfo + +func (m *KubernetesInfo) Reset() { *m = KubernetesInfo{} } +func (*KubernetesInfo) ProtoMessage() {} +func (*KubernetesInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{62} +} +func (m *KubernetesInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KubernetesInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KubernetesInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_KubernetesInfo.Merge(m, src) +} +func (m *KubernetesInfo) XXX_Size() int { + return m.Size() +} +func (m *KubernetesInfo) XXX_DiscardUnknown() { + xxx_messageInfo_KubernetesInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_KubernetesInfo proto.InternalMessageInfo + +func (m *KubernetesSettings) Reset() { *m = KubernetesSettings{} } +func (*KubernetesSettings) ProtoMessage() {} +func (*KubernetesSettings) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{63} +} +func (m *KubernetesSettings) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KubernetesSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KubernetesSettings) XXX_Merge(src proto.Message) { + xxx_messageInfo_KubernetesSettings.Merge(m, src) +} +func (m *KubernetesSettings) XXX_Size() int { + return m.Size() +} +func (m *KubernetesSettings) XXX_DiscardUnknown() { + xxx_messageInfo_KubernetesSettings.DiscardUnknown(m) +} + +var xxx_messageInfo_KubernetesSettings proto.InternalMessageInfo + +func (m *LastError) Reset() { *m = LastError{} } +func (*LastError) ProtoMessage() {} +func (*LastError) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{64} +} +func (m *LastError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LastError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LastError) XXX_Merge(src proto.Message) { + xxx_messageInfo_LastError.Merge(m, src) +} +func (m *LastError) XXX_Size() int { + return m.Size() +} +func (m *LastError) XXX_DiscardUnknown() { + xxx_messageInfo_LastError.DiscardUnknown(m) +} + +var xxx_messageInfo_LastError proto.InternalMessageInfo + +func (m *LastOperation) Reset() { *m = LastOperation{} } +func (*LastOperation) ProtoMessage() {} +func (*LastOperation) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{65} +} +func (m *LastOperation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LastOperation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LastOperation) XXX_Merge(src proto.Message) { + xxx_messageInfo_LastOperation.Merge(m, src) +} +func (m *LastOperation) XXX_Size() int { + return m.Size() +} +func (m *LastOperation) XXX_DiscardUnknown() { + xxx_messageInfo_LastOperation.DiscardUnknown(m) +} + +var xxx_messageInfo_LastOperation proto.InternalMessageInfo + +func (m *Machine) Reset() { *m = Machine{} } +func (*Machine) ProtoMessage() {} +func (*Machine) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{66} +} +func (m *Machine) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Machine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Machine) XXX_Merge(src proto.Message) { + xxx_messageInfo_Machine.Merge(m, src) +} +func (m *Machine) XXX_Size() int { + return m.Size() +} +func (m *Machine) XXX_DiscardUnknown() { + xxx_messageInfo_Machine.DiscardUnknown(m) +} + +var xxx_messageInfo_Machine proto.InternalMessageInfo + +func (m *MachineControllerManagerSettings) Reset() { *m = MachineControllerManagerSettings{} } +func (*MachineControllerManagerSettings) ProtoMessage() {} +func (*MachineControllerManagerSettings) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{67} +} +func (m *MachineControllerManagerSettings) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MachineControllerManagerSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MachineControllerManagerSettings) XXX_Merge(src proto.Message) { + xxx_messageInfo_MachineControllerManagerSettings.Merge(m, src) +} +func (m *MachineControllerManagerSettings) XXX_Size() int { + return m.Size() +} +func (m *MachineControllerManagerSettings) XXX_DiscardUnknown() { + xxx_messageInfo_MachineControllerManagerSettings.DiscardUnknown(m) +} + +var xxx_messageInfo_MachineControllerManagerSettings proto.InternalMessageInfo + +func (m *MachineImage) Reset() { *m = MachineImage{} } +func (*MachineImage) ProtoMessage() {} +func (*MachineImage) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{68} +} +func (m *MachineImage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MachineImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MachineImage) XXX_Merge(src proto.Message) { + xxx_messageInfo_MachineImage.Merge(m, src) +} +func (m *MachineImage) XXX_Size() int { + return m.Size() +} +func (m *MachineImage) XXX_DiscardUnknown() { + xxx_messageInfo_MachineImage.DiscardUnknown(m) +} + +var xxx_messageInfo_MachineImage proto.InternalMessageInfo + +func (m *MachineImageVersion) Reset() { *m = MachineImageVersion{} } +func (*MachineImageVersion) ProtoMessage() {} +func (*MachineImageVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{69} +} +func (m *MachineImageVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MachineImageVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MachineImageVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_MachineImageVersion.Merge(m, src) +} +func (m *MachineImageVersion) XXX_Size() int { + return m.Size() +} +func (m *MachineImageVersion) XXX_DiscardUnknown() { + xxx_messageInfo_MachineImageVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_MachineImageVersion proto.InternalMessageInfo + +func (m *MachineType) Reset() { *m = MachineType{} } +func (*MachineType) ProtoMessage() {} +func (*MachineType) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{70} +} +func (m *MachineType) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MachineType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MachineType) XXX_Merge(src proto.Message) { + xxx_messageInfo_MachineType.Merge(m, src) +} +func (m *MachineType) XXX_Size() int { + return m.Size() +} +func (m *MachineType) XXX_DiscardUnknown() { + xxx_messageInfo_MachineType.DiscardUnknown(m) +} + +var xxx_messageInfo_MachineType proto.InternalMessageInfo + +func (m *MachineTypeStorage) Reset() { *m = MachineTypeStorage{} } +func (*MachineTypeStorage) ProtoMessage() {} +func (*MachineTypeStorage) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{71} +} +func (m *MachineTypeStorage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MachineTypeStorage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MachineTypeStorage) XXX_Merge(src proto.Message) { + xxx_messageInfo_MachineTypeStorage.Merge(m, src) +} +func (m *MachineTypeStorage) XXX_Size() int { + return m.Size() +} +func (m *MachineTypeStorage) XXX_DiscardUnknown() { + xxx_messageInfo_MachineTypeStorage.DiscardUnknown(m) +} + +var xxx_messageInfo_MachineTypeStorage proto.InternalMessageInfo + +func (m *Maintenance) Reset() { *m = Maintenance{} } +func (*Maintenance) ProtoMessage() {} +func (*Maintenance) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{72} +} +func (m *Maintenance) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Maintenance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Maintenance) XXX_Merge(src proto.Message) { + xxx_messageInfo_Maintenance.Merge(m, src) +} +func (m *Maintenance) XXX_Size() int { + return m.Size() +} +func (m *Maintenance) XXX_DiscardUnknown() { + xxx_messageInfo_Maintenance.DiscardUnknown(m) +} + +var xxx_messageInfo_Maintenance proto.InternalMessageInfo + +func (m *MaintenanceAutoUpdate) Reset() { *m = MaintenanceAutoUpdate{} } +func (*MaintenanceAutoUpdate) ProtoMessage() {} +func (*MaintenanceAutoUpdate) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{73} +} +func (m *MaintenanceAutoUpdate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MaintenanceAutoUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MaintenanceAutoUpdate) XXX_Merge(src proto.Message) { + xxx_messageInfo_MaintenanceAutoUpdate.Merge(m, src) +} +func (m *MaintenanceAutoUpdate) XXX_Size() int { + return m.Size() +} +func (m *MaintenanceAutoUpdate) XXX_DiscardUnknown() { + xxx_messageInfo_MaintenanceAutoUpdate.DiscardUnknown(m) +} + +var xxx_messageInfo_MaintenanceAutoUpdate proto.InternalMessageInfo + +func (m *MaintenanceTimeWindow) Reset() { *m = MaintenanceTimeWindow{} } +func (*MaintenanceTimeWindow) ProtoMessage() {} +func (*MaintenanceTimeWindow) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{74} +} +func (m *MaintenanceTimeWindow) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MaintenanceTimeWindow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MaintenanceTimeWindow) XXX_Merge(src proto.Message) { + xxx_messageInfo_MaintenanceTimeWindow.Merge(m, src) +} +func (m *MaintenanceTimeWindow) XXX_Size() int { + return m.Size() +} +func (m *MaintenanceTimeWindow) XXX_DiscardUnknown() { + xxx_messageInfo_MaintenanceTimeWindow.DiscardUnknown(m) +} + +var xxx_messageInfo_MaintenanceTimeWindow proto.InternalMessageInfo + +func (m *Monitoring) Reset() { *m = Monitoring{} } +func (*Monitoring) ProtoMessage() {} +func (*Monitoring) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{75} +} +func (m *Monitoring) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Monitoring) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Monitoring) XXX_Merge(src proto.Message) { + xxx_messageInfo_Monitoring.Merge(m, src) +} +func (m *Monitoring) XXX_Size() int { + return m.Size() +} +func (m *Monitoring) XXX_DiscardUnknown() { + xxx_messageInfo_Monitoring.DiscardUnknown(m) +} + +var xxx_messageInfo_Monitoring proto.InternalMessageInfo + +func (m *NamedResourceReference) Reset() { *m = NamedResourceReference{} } +func (*NamedResourceReference) ProtoMessage() {} +func (*NamedResourceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{76} +} +func (m *NamedResourceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedResourceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedResourceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedResourceReference.Merge(m, src) +} +func (m *NamedResourceReference) XXX_Size() int { + return m.Size() +} +func (m *NamedResourceReference) XXX_DiscardUnknown() { + xxx_messageInfo_NamedResourceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedResourceReference proto.InternalMessageInfo + +func (m *Networking) Reset() { *m = Networking{} } +func (*Networking) ProtoMessage() {} +func (*Networking) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{77} +} +func (m *Networking) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Networking) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Networking) XXX_Merge(src proto.Message) { + xxx_messageInfo_Networking.Merge(m, src) +} +func (m *Networking) XXX_Size() int { + return m.Size() +} +func (m *Networking) XXX_DiscardUnknown() { + xxx_messageInfo_Networking.DiscardUnknown(m) +} + +var xxx_messageInfo_Networking proto.InternalMessageInfo + +func (m *NginxIngress) Reset() { *m = NginxIngress{} } +func (*NginxIngress) ProtoMessage() {} +func (*NginxIngress) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{78} +} +func (m *NginxIngress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NginxIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NginxIngress) XXX_Merge(src proto.Message) { + xxx_messageInfo_NginxIngress.Merge(m, src) +} +func (m *NginxIngress) XXX_Size() int { + return m.Size() +} +func (m *NginxIngress) XXX_DiscardUnknown() { + xxx_messageInfo_NginxIngress.DiscardUnknown(m) +} + +var xxx_messageInfo_NginxIngress proto.InternalMessageInfo + +func (m *OIDCConfig) Reset() { *m = OIDCConfig{} } +func (*OIDCConfig) ProtoMessage() {} +func (*OIDCConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{79} +} +func (m *OIDCConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OIDCConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OIDCConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_OIDCConfig.Merge(m, src) +} +func (m *OIDCConfig) XXX_Size() int { + return m.Size() +} +func (m *OIDCConfig) XXX_DiscardUnknown() { + xxx_messageInfo_OIDCConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_OIDCConfig proto.InternalMessageInfo + +func (m *OpenIDConnectClientAuthentication) Reset() { *m = OpenIDConnectClientAuthentication{} } +func (*OpenIDConnectClientAuthentication) ProtoMessage() {} +func (*OpenIDConnectClientAuthentication) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{80} +} +func (m *OpenIDConnectClientAuthentication) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OpenIDConnectClientAuthentication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OpenIDConnectClientAuthentication) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpenIDConnectClientAuthentication.Merge(m, src) +} +func (m *OpenIDConnectClientAuthentication) XXX_Size() int { + return m.Size() +} +func (m *OpenIDConnectClientAuthentication) XXX_DiscardUnknown() { + xxx_messageInfo_OpenIDConnectClientAuthentication.DiscardUnknown(m) +} + +var xxx_messageInfo_OpenIDConnectClientAuthentication proto.InternalMessageInfo + +func (m *Plant) Reset() { *m = Plant{} } +func (*Plant) ProtoMessage() {} +func (*Plant) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{81} +} +func (m *Plant) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Plant) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Plant) XXX_Merge(src proto.Message) { + xxx_messageInfo_Plant.Merge(m, src) +} +func (m *Plant) XXX_Size() int { + return m.Size() +} +func (m *Plant) XXX_DiscardUnknown() { + xxx_messageInfo_Plant.DiscardUnknown(m) +} + +var xxx_messageInfo_Plant proto.InternalMessageInfo + +func (m *PlantList) Reset() { *m = PlantList{} } +func (*PlantList) ProtoMessage() {} +func (*PlantList) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{82} +} +func (m *PlantList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PlantList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PlantList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlantList.Merge(m, src) +} +func (m *PlantList) XXX_Size() int { + return m.Size() +} +func (m *PlantList) XXX_DiscardUnknown() { + xxx_messageInfo_PlantList.DiscardUnknown(m) +} + +var xxx_messageInfo_PlantList proto.InternalMessageInfo + +func (m *PlantSpec) Reset() { *m = PlantSpec{} } +func (*PlantSpec) ProtoMessage() {} +func (*PlantSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{83} +} +func (m *PlantSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PlantSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PlantSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlantSpec.Merge(m, src) +} +func (m *PlantSpec) XXX_Size() int { + return m.Size() +} +func (m *PlantSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PlantSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PlantSpec proto.InternalMessageInfo + +func (m *PlantStatus) Reset() { *m = PlantStatus{} } +func (*PlantStatus) ProtoMessage() {} +func (*PlantStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{84} +} +func (m *PlantStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PlantStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PlantStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlantStatus.Merge(m, src) +} +func (m *PlantStatus) XXX_Size() int { + return m.Size() +} +func (m *PlantStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PlantStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PlantStatus proto.InternalMessageInfo + +func (m *Project) Reset() { *m = Project{} } +func (*Project) ProtoMessage() {} +func (*Project) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{85} +} +func (m *Project) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Project) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Project) XXX_Merge(src proto.Message) { + xxx_messageInfo_Project.Merge(m, src) +} +func (m *Project) XXX_Size() int { + return m.Size() +} +func (m *Project) XXX_DiscardUnknown() { + xxx_messageInfo_Project.DiscardUnknown(m) +} + +var xxx_messageInfo_Project proto.InternalMessageInfo + +func (m *ProjectList) Reset() { *m = ProjectList{} } +func (*ProjectList) ProtoMessage() {} +func (*ProjectList) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{86} +} +func (m *ProjectList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectList.Merge(m, src) +} +func (m *ProjectList) XXX_Size() int { + return m.Size() +} +func (m *ProjectList) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectList.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectList proto.InternalMessageInfo + +func (m *ProjectMember) Reset() { *m = ProjectMember{} } +func (*ProjectMember) ProtoMessage() {} +func (*ProjectMember) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{87} +} +func (m *ProjectMember) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectMember) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectMember) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectMember.Merge(m, src) +} +func (m *ProjectMember) XXX_Size() int { + return m.Size() +} +func (m *ProjectMember) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectMember.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectMember proto.InternalMessageInfo + +func (m *ProjectSpec) Reset() { *m = ProjectSpec{} } +func (*ProjectSpec) ProtoMessage() {} +func (*ProjectSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{88} +} +func (m *ProjectSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectSpec.Merge(m, src) +} +func (m *ProjectSpec) XXX_Size() int { + return m.Size() +} +func (m *ProjectSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectSpec proto.InternalMessageInfo + +func (m *ProjectStatus) Reset() { *m = ProjectStatus{} } +func (*ProjectStatus) ProtoMessage() {} +func (*ProjectStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{89} +} +func (m *ProjectStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectStatus.Merge(m, src) +} +func (m *ProjectStatus) XXX_Size() int { + return m.Size() +} +func (m *ProjectStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectStatus proto.InternalMessageInfo + +func (m *ProjectTolerations) Reset() { *m = ProjectTolerations{} } +func (*ProjectTolerations) ProtoMessage() {} +func (*ProjectTolerations) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{90} +} +func (m *ProjectTolerations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectTolerations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectTolerations) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectTolerations.Merge(m, src) +} +func (m *ProjectTolerations) XXX_Size() int { + return m.Size() +} +func (m *ProjectTolerations) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectTolerations.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectTolerations proto.InternalMessageInfo + +func (m *Provider) Reset() { *m = Provider{} } +func (*Provider) ProtoMessage() {} +func (*Provider) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{91} +} +func (m *Provider) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Provider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Provider) XXX_Merge(src proto.Message) { + xxx_messageInfo_Provider.Merge(m, src) +} +func (m *Provider) XXX_Size() int { + return m.Size() +} +func (m *Provider) XXX_DiscardUnknown() { + xxx_messageInfo_Provider.DiscardUnknown(m) +} + +var xxx_messageInfo_Provider proto.InternalMessageInfo + +func (m *Quota) Reset() { *m = Quota{} } +func (*Quota) ProtoMessage() {} +func (*Quota) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{92} +} +func (m *Quota) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Quota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Quota) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quota.Merge(m, src) +} +func (m *Quota) XXX_Size() int { + return m.Size() +} +func (m *Quota) XXX_DiscardUnknown() { + xxx_messageInfo_Quota.DiscardUnknown(m) +} + +var xxx_messageInfo_Quota proto.InternalMessageInfo + +func (m *QuotaList) Reset() { *m = QuotaList{} } +func (*QuotaList) ProtoMessage() {} +func (*QuotaList) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{93} +} +func (m *QuotaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *QuotaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuotaList.Merge(m, src) +} +func (m *QuotaList) XXX_Size() int { + return m.Size() +} +func (m *QuotaList) XXX_DiscardUnknown() { + xxx_messageInfo_QuotaList.DiscardUnknown(m) +} + +var xxx_messageInfo_QuotaList proto.InternalMessageInfo + +func (m *QuotaSpec) Reset() { *m = QuotaSpec{} } +func (*QuotaSpec) ProtoMessage() {} +func (*QuotaSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{94} +} +func (m *QuotaSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QuotaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *QuotaSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuotaSpec.Merge(m, src) +} +func (m *QuotaSpec) XXX_Size() int { + return m.Size() +} +func (m *QuotaSpec) XXX_DiscardUnknown() { + xxx_messageInfo_QuotaSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_QuotaSpec proto.InternalMessageInfo + +func (m *Region) Reset() { *m = Region{} } +func (*Region) ProtoMessage() {} +func (*Region) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{95} +} +func (m *Region) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Region) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Region) XXX_Merge(src proto.Message) { + xxx_messageInfo_Region.Merge(m, src) +} +func (m *Region) XXX_Size() int { + return m.Size() +} +func (m *Region) XXX_DiscardUnknown() { + xxx_messageInfo_Region.DiscardUnknown(m) +} + +var xxx_messageInfo_Region proto.InternalMessageInfo + +func (m *ResourceData) Reset() { *m = ResourceData{} } +func (*ResourceData) ProtoMessage() {} +func (*ResourceData) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{96} +} +func (m *ResourceData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceData.Merge(m, src) +} +func (m *ResourceData) XXX_Size() int { + return m.Size() +} +func (m *ResourceData) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceData.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceData proto.InternalMessageInfo + +func (m *ResourceWatchCacheSize) Reset() { *m = ResourceWatchCacheSize{} } +func (*ResourceWatchCacheSize) ProtoMessage() {} +func (*ResourceWatchCacheSize) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{97} +} +func (m *ResourceWatchCacheSize) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceWatchCacheSize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceWatchCacheSize) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceWatchCacheSize.Merge(m, src) +} +func (m *ResourceWatchCacheSize) XXX_Size() int { + return m.Size() +} +func (m *ResourceWatchCacheSize) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceWatchCacheSize.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceWatchCacheSize proto.InternalMessageInfo + +func (m *SecretBinding) Reset() { *m = SecretBinding{} } +func (*SecretBinding) ProtoMessage() {} +func (*SecretBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{98} +} +func (m *SecretBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretBinding.Merge(m, src) +} +func (m *SecretBinding) XXX_Size() int { + return m.Size() +} +func (m *SecretBinding) XXX_DiscardUnknown() { + xxx_messageInfo_SecretBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretBinding proto.InternalMessageInfo + +func (m *SecretBindingList) Reset() { *m = SecretBindingList{} } +func (*SecretBindingList) ProtoMessage() {} +func (*SecretBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{99} +} +func (m *SecretBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretBindingList.Merge(m, src) +} +func (m *SecretBindingList) XXX_Size() int { + return m.Size() +} +func (m *SecretBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_SecretBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretBindingList proto.InternalMessageInfo + +func (m *Seed) Reset() { *m = Seed{} } +func (*Seed) ProtoMessage() {} +func (*Seed) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{100} +} +func (m *Seed) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Seed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Seed) XXX_Merge(src proto.Message) { + xxx_messageInfo_Seed.Merge(m, src) +} +func (m *Seed) XXX_Size() int { + return m.Size() +} +func (m *Seed) XXX_DiscardUnknown() { + xxx_messageInfo_Seed.DiscardUnknown(m) +} + +var xxx_messageInfo_Seed proto.InternalMessageInfo + +func (m *SeedBackup) Reset() { *m = SeedBackup{} } +func (*SeedBackup) ProtoMessage() {} +func (*SeedBackup) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{101} +} +func (m *SeedBackup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedBackup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedBackup) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedBackup.Merge(m, src) +} +func (m *SeedBackup) XXX_Size() int { + return m.Size() +} +func (m *SeedBackup) XXX_DiscardUnknown() { + xxx_messageInfo_SeedBackup.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedBackup proto.InternalMessageInfo + +func (m *SeedDNS) Reset() { *m = SeedDNS{} } +func (*SeedDNS) ProtoMessage() {} +func (*SeedDNS) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{102} +} +func (m *SeedDNS) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedDNS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedDNS) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedDNS.Merge(m, src) +} +func (m *SeedDNS) XXX_Size() int { + return m.Size() +} +func (m *SeedDNS) XXX_DiscardUnknown() { + xxx_messageInfo_SeedDNS.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedDNS proto.InternalMessageInfo + +func (m *SeedDNSProvider) Reset() { *m = SeedDNSProvider{} } +func (*SeedDNSProvider) ProtoMessage() {} +func (*SeedDNSProvider) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{103} +} +func (m *SeedDNSProvider) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedDNSProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedDNSProvider) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedDNSProvider.Merge(m, src) +} +func (m *SeedDNSProvider) XXX_Size() int { + return m.Size() +} +func (m *SeedDNSProvider) XXX_DiscardUnknown() { + xxx_messageInfo_SeedDNSProvider.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedDNSProvider proto.InternalMessageInfo + +func (m *SeedList) Reset() { *m = SeedList{} } +func (*SeedList) ProtoMessage() {} +func (*SeedList) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{104} +} +func (m *SeedList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedList) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedList.Merge(m, src) +} +func (m *SeedList) XXX_Size() int { + return m.Size() +} +func (m *SeedList) XXX_DiscardUnknown() { + xxx_messageInfo_SeedList.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedList proto.InternalMessageInfo + +func (m *SeedNetworks) Reset() { *m = SeedNetworks{} } +func (*SeedNetworks) ProtoMessage() {} +func (*SeedNetworks) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{105} +} +func (m *SeedNetworks) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedNetworks) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedNetworks) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedNetworks.Merge(m, src) +} +func (m *SeedNetworks) XXX_Size() int { + return m.Size() +} +func (m *SeedNetworks) XXX_DiscardUnknown() { + xxx_messageInfo_SeedNetworks.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedNetworks proto.InternalMessageInfo + +func (m *SeedProvider) Reset() { *m = SeedProvider{} } +func (*SeedProvider) ProtoMessage() {} +func (*SeedProvider) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{106} +} +func (m *SeedProvider) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedProvider) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedProvider.Merge(m, src) +} +func (m *SeedProvider) XXX_Size() int { + return m.Size() +} +func (m *SeedProvider) XXX_DiscardUnknown() { + xxx_messageInfo_SeedProvider.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedProvider proto.InternalMessageInfo + +func (m *SeedSelector) Reset() { *m = SeedSelector{} } +func (*SeedSelector) ProtoMessage() {} +func (*SeedSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{107} +} +func (m *SeedSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedSelector.Merge(m, src) +} +func (m *SeedSelector) XXX_Size() int { + return m.Size() +} +func (m *SeedSelector) XXX_DiscardUnknown() { + xxx_messageInfo_SeedSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedSelector proto.InternalMessageInfo + +func (m *SeedSettingExcessCapacityReservation) Reset() { *m = SeedSettingExcessCapacityReservation{} } +func (*SeedSettingExcessCapacityReservation) ProtoMessage() {} +func (*SeedSettingExcessCapacityReservation) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{108} +} +func (m *SeedSettingExcessCapacityReservation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedSettingExcessCapacityReservation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedSettingExcessCapacityReservation) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedSettingExcessCapacityReservation.Merge(m, src) +} +func (m *SeedSettingExcessCapacityReservation) XXX_Size() int { + return m.Size() +} +func (m *SeedSettingExcessCapacityReservation) XXX_DiscardUnknown() { + xxx_messageInfo_SeedSettingExcessCapacityReservation.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedSettingExcessCapacityReservation proto.InternalMessageInfo + +func (m *SeedSettingLoadBalancerServices) Reset() { *m = SeedSettingLoadBalancerServices{} } +func (*SeedSettingLoadBalancerServices) ProtoMessage() {} +func (*SeedSettingLoadBalancerServices) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{109} +} +func (m *SeedSettingLoadBalancerServices) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedSettingLoadBalancerServices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedSettingLoadBalancerServices) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedSettingLoadBalancerServices.Merge(m, src) +} +func (m *SeedSettingLoadBalancerServices) XXX_Size() int { + return m.Size() +} +func (m *SeedSettingLoadBalancerServices) XXX_DiscardUnknown() { + xxx_messageInfo_SeedSettingLoadBalancerServices.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedSettingLoadBalancerServices proto.InternalMessageInfo + +func (m *SeedSettingScheduling) Reset() { *m = SeedSettingScheduling{} } +func (*SeedSettingScheduling) ProtoMessage() {} +func (*SeedSettingScheduling) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{110} +} +func (m *SeedSettingScheduling) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedSettingScheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedSettingScheduling) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedSettingScheduling.Merge(m, src) +} +func (m *SeedSettingScheduling) XXX_Size() int { + return m.Size() +} +func (m *SeedSettingScheduling) XXX_DiscardUnknown() { + xxx_messageInfo_SeedSettingScheduling.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedSettingScheduling proto.InternalMessageInfo + +func (m *SeedSettingShootDNS) Reset() { *m = SeedSettingShootDNS{} } +func (*SeedSettingShootDNS) ProtoMessage() {} +func (*SeedSettingShootDNS) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{111} +} +func (m *SeedSettingShootDNS) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedSettingShootDNS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedSettingShootDNS) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedSettingShootDNS.Merge(m, src) +} +func (m *SeedSettingShootDNS) XXX_Size() int { + return m.Size() +} +func (m *SeedSettingShootDNS) XXX_DiscardUnknown() { + xxx_messageInfo_SeedSettingShootDNS.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedSettingShootDNS proto.InternalMessageInfo + +func (m *SeedSettingVerticalPodAutoscaler) Reset() { *m = SeedSettingVerticalPodAutoscaler{} } +func (*SeedSettingVerticalPodAutoscaler) ProtoMessage() {} +func (*SeedSettingVerticalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{112} +} +func (m *SeedSettingVerticalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedSettingVerticalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedSettingVerticalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedSettingVerticalPodAutoscaler.Merge(m, src) +} +func (m *SeedSettingVerticalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *SeedSettingVerticalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_SeedSettingVerticalPodAutoscaler.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedSettingVerticalPodAutoscaler proto.InternalMessageInfo + +func (m *SeedSettings) Reset() { *m = SeedSettings{} } +func (*SeedSettings) ProtoMessage() {} +func (*SeedSettings) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{113} +} +func (m *SeedSettings) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedSettings) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedSettings.Merge(m, src) +} +func (m *SeedSettings) XXX_Size() int { + return m.Size() +} +func (m *SeedSettings) XXX_DiscardUnknown() { + xxx_messageInfo_SeedSettings.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedSettings proto.InternalMessageInfo + +func (m *SeedSpec) Reset() { *m = SeedSpec{} } +func (*SeedSpec) ProtoMessage() {} +func (*SeedSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{114} +} +func (m *SeedSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedSpec.Merge(m, src) +} +func (m *SeedSpec) XXX_Size() int { + return m.Size() +} +func (m *SeedSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SeedSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedSpec proto.InternalMessageInfo + +func (m *SeedStatus) Reset() { *m = SeedStatus{} } +func (*SeedStatus) ProtoMessage() {} +func (*SeedStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{115} +} +func (m *SeedStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedStatus.Merge(m, src) +} +func (m *SeedStatus) XXX_Size() int { + return m.Size() +} +func (m *SeedStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SeedStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedStatus proto.InternalMessageInfo + +func (m *SeedTaint) Reset() { *m = SeedTaint{} } +func (*SeedTaint) ProtoMessage() {} +func (*SeedTaint) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{116} +} +func (m *SeedTaint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedTaint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedTaint) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedTaint.Merge(m, src) +} +func (m *SeedTaint) XXX_Size() int { + return m.Size() +} +func (m *SeedTaint) XXX_DiscardUnknown() { + xxx_messageInfo_SeedTaint.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedTaint proto.InternalMessageInfo + +func (m *SeedVolume) Reset() { *m = SeedVolume{} } +func (*SeedVolume) ProtoMessage() {} +func (*SeedVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{117} +} +func (m *SeedVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedVolume.Merge(m, src) +} +func (m *SeedVolume) XXX_Size() int { + return m.Size() +} +func (m *SeedVolume) XXX_DiscardUnknown() { + xxx_messageInfo_SeedVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedVolume proto.InternalMessageInfo + +func (m *SeedVolumeProvider) Reset() { *m = SeedVolumeProvider{} } +func (*SeedVolumeProvider) ProtoMessage() {} +func (*SeedVolumeProvider) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{118} +} +func (m *SeedVolumeProvider) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedVolumeProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedVolumeProvider) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedVolumeProvider.Merge(m, src) +} +func (m *SeedVolumeProvider) XXX_Size() int { + return m.Size() +} +func (m *SeedVolumeProvider) XXX_DiscardUnknown() { + xxx_messageInfo_SeedVolumeProvider.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedVolumeProvider proto.InternalMessageInfo + +func (m *ServiceAccountConfig) Reset() { *m = ServiceAccountConfig{} } +func (*ServiceAccountConfig) ProtoMessage() {} +func (*ServiceAccountConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{119} +} +func (m *ServiceAccountConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountConfig.Merge(m, src) +} +func (m *ServiceAccountConfig) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountConfig proto.InternalMessageInfo + +func (m *Shoot) Reset() { *m = Shoot{} } +func (*Shoot) ProtoMessage() {} +func (*Shoot) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{120} +} +func (m *Shoot) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Shoot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Shoot) XXX_Merge(src proto.Message) { + xxx_messageInfo_Shoot.Merge(m, src) +} +func (m *Shoot) XXX_Size() int { + return m.Size() +} +func (m *Shoot) XXX_DiscardUnknown() { + xxx_messageInfo_Shoot.DiscardUnknown(m) +} + +var xxx_messageInfo_Shoot proto.InternalMessageInfo + +func (m *ShootList) Reset() { *m = ShootList{} } +func (*ShootList) ProtoMessage() {} +func (*ShootList) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{121} +} +func (m *ShootList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ShootList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ShootList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShootList.Merge(m, src) +} +func (m *ShootList) XXX_Size() int { + return m.Size() +} +func (m *ShootList) XXX_DiscardUnknown() { + xxx_messageInfo_ShootList.DiscardUnknown(m) +} + +var xxx_messageInfo_ShootList proto.InternalMessageInfo + +func (m *ShootMachineImage) Reset() { *m = ShootMachineImage{} } +func (*ShootMachineImage) ProtoMessage() {} +func (*ShootMachineImage) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{122} +} +func (m *ShootMachineImage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ShootMachineImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ShootMachineImage) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShootMachineImage.Merge(m, src) +} +func (m *ShootMachineImage) XXX_Size() int { + return m.Size() +} +func (m *ShootMachineImage) XXX_DiscardUnknown() { + xxx_messageInfo_ShootMachineImage.DiscardUnknown(m) +} + +var xxx_messageInfo_ShootMachineImage proto.InternalMessageInfo + +func (m *ShootNetworks) Reset() { *m = ShootNetworks{} } +func (*ShootNetworks) ProtoMessage() {} +func (*ShootNetworks) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{123} +} +func (m *ShootNetworks) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ShootNetworks) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ShootNetworks) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShootNetworks.Merge(m, src) +} +func (m *ShootNetworks) XXX_Size() int { + return m.Size() +} +func (m *ShootNetworks) XXX_DiscardUnknown() { + xxx_messageInfo_ShootNetworks.DiscardUnknown(m) +} + +var xxx_messageInfo_ShootNetworks proto.InternalMessageInfo + +func (m *ShootSpec) Reset() { *m = ShootSpec{} } +func (*ShootSpec) ProtoMessage() {} +func (*ShootSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{124} +} +func (m *ShootSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ShootSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ShootSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShootSpec.Merge(m, src) +} +func (m *ShootSpec) XXX_Size() int { + return m.Size() +} +func (m *ShootSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ShootSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ShootSpec proto.InternalMessageInfo + +func (m *ShootState) Reset() { *m = ShootState{} } +func (*ShootState) ProtoMessage() {} +func (*ShootState) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{125} +} +func (m *ShootState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ShootState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ShootState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShootState.Merge(m, src) +} +func (m *ShootState) XXX_Size() int { + return m.Size() +} +func (m *ShootState) XXX_DiscardUnknown() { + xxx_messageInfo_ShootState.DiscardUnknown(m) +} + +var xxx_messageInfo_ShootState proto.InternalMessageInfo + +func (m *ShootStateList) Reset() { *m = ShootStateList{} } +func (*ShootStateList) ProtoMessage() {} +func (*ShootStateList) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{126} +} +func (m *ShootStateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ShootStateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ShootStateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShootStateList.Merge(m, src) +} +func (m *ShootStateList) XXX_Size() int { + return m.Size() +} +func (m *ShootStateList) XXX_DiscardUnknown() { + xxx_messageInfo_ShootStateList.DiscardUnknown(m) +} + +var xxx_messageInfo_ShootStateList proto.InternalMessageInfo + +func (m *ShootStateSpec) Reset() { *m = ShootStateSpec{} } +func (*ShootStateSpec) ProtoMessage() {} +func (*ShootStateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{127} +} +func (m *ShootStateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ShootStateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ShootStateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShootStateSpec.Merge(m, src) +} +func (m *ShootStateSpec) XXX_Size() int { + return m.Size() +} +func (m *ShootStateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ShootStateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ShootStateSpec proto.InternalMessageInfo + +func (m *ShootStatus) Reset() { *m = ShootStatus{} } +func (*ShootStatus) ProtoMessage() {} +func (*ShootStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{128} +} +func (m *ShootStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ShootStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ShootStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShootStatus.Merge(m, src) +} +func (m *ShootStatus) XXX_Size() int { + return m.Size() +} +func (m *ShootStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ShootStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ShootStatus proto.InternalMessageInfo + +func (m *Toleration) Reset() { *m = Toleration{} } +func (*Toleration) ProtoMessage() {} +func (*Toleration) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{129} +} +func (m *Toleration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Toleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Toleration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Toleration.Merge(m, src) +} +func (m *Toleration) XXX_Size() int { + return m.Size() +} +func (m *Toleration) XXX_DiscardUnknown() { + xxx_messageInfo_Toleration.DiscardUnknown(m) +} + +var xxx_messageInfo_Toleration proto.InternalMessageInfo + +func (m *VerticalPodAutoscaler) Reset() { *m = VerticalPodAutoscaler{} } +func (*VerticalPodAutoscaler) ProtoMessage() {} +func (*VerticalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{130} +} +func (m *VerticalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VerticalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VerticalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_VerticalPodAutoscaler.Merge(m, src) +} +func (m *VerticalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *VerticalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_VerticalPodAutoscaler.DiscardUnknown(m) +} + +var xxx_messageInfo_VerticalPodAutoscaler proto.InternalMessageInfo + +func (m *Volume) Reset() { *m = Volume{} } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{131} +} +func (m *Volume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Volume) XXX_Merge(src proto.Message) { + xxx_messageInfo_Volume.Merge(m, src) +} +func (m *Volume) XXX_Size() int { + return m.Size() +} +func (m *Volume) XXX_DiscardUnknown() { + xxx_messageInfo_Volume.DiscardUnknown(m) +} + +var xxx_messageInfo_Volume proto.InternalMessageInfo + +func (m *VolumeType) Reset() { *m = VolumeType{} } +func (*VolumeType) ProtoMessage() {} +func (*VolumeType) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{132} +} +func (m *VolumeType) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeType) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeType.Merge(m, src) +} +func (m *VolumeType) XXX_Size() int { + return m.Size() +} +func (m *VolumeType) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeType.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeType proto.InternalMessageInfo + +func (m *WatchCacheSizes) Reset() { *m = WatchCacheSizes{} } +func (*WatchCacheSizes) ProtoMessage() {} +func (*WatchCacheSizes) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{133} +} +func (m *WatchCacheSizes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WatchCacheSizes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WatchCacheSizes) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchCacheSizes.Merge(m, src) +} +func (m *WatchCacheSizes) XXX_Size() int { + return m.Size() +} +func (m *WatchCacheSizes) XXX_DiscardUnknown() { + xxx_messageInfo_WatchCacheSizes.DiscardUnknown(m) +} + +var xxx_messageInfo_WatchCacheSizes proto.InternalMessageInfo + +func (m *Worker) Reset() { *m = Worker{} } +func (*Worker) ProtoMessage() {} +func (*Worker) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{134} +} +func (m *Worker) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Worker) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Worker) XXX_Merge(src proto.Message) { + xxx_messageInfo_Worker.Merge(m, src) +} +func (m *Worker) XXX_Size() int { + return m.Size() +} +func (m *Worker) XXX_DiscardUnknown() { + xxx_messageInfo_Worker.DiscardUnknown(m) +} + +var xxx_messageInfo_Worker proto.InternalMessageInfo + +func (m *WorkerKubernetes) Reset() { *m = WorkerKubernetes{} } +func (*WorkerKubernetes) ProtoMessage() {} +func (*WorkerKubernetes) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{135} +} +func (m *WorkerKubernetes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkerKubernetes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkerKubernetes) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkerKubernetes.Merge(m, src) +} +func (m *WorkerKubernetes) XXX_Size() int { + return m.Size() +} +func (m *WorkerKubernetes) XXX_DiscardUnknown() { + xxx_messageInfo_WorkerKubernetes.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkerKubernetes proto.InternalMessageInfo + +func (m *WorkerSystemComponents) Reset() { *m = WorkerSystemComponents{} } +func (*WorkerSystemComponents) ProtoMessage() {} +func (*WorkerSystemComponents) Descriptor() ([]byte, []int) { + return fileDescriptor_f1caaec5647a9dbf, []int{136} +} +func (m *WorkerSystemComponents) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkerSystemComponents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkerSystemComponents) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkerSystemComponents.Merge(m, src) +} +func (m *WorkerSystemComponents) XXX_Size() int { + return m.Size() +} +func (m *WorkerSystemComponents) XXX_DiscardUnknown() { + xxx_messageInfo_WorkerSystemComponents.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkerSystemComponents proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Addon)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Addon") + proto.RegisterType((*Addons)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Addons") + proto.RegisterType((*AdmissionPlugin)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.AdmissionPlugin") + proto.RegisterType((*Alerting)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Alerting") + proto.RegisterType((*AuditConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.AuditConfig") + proto.RegisterType((*AuditPolicy)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.AuditPolicy") + proto.RegisterType((*AvailabilityZone)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.AvailabilityZone") + proto.RegisterType((*BackupBucket)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.BackupBucket") + proto.RegisterType((*BackupBucketList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.BackupBucketList") + proto.RegisterType((*BackupBucketProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.BackupBucketProvider") + proto.RegisterType((*BackupBucketSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.BackupBucketSpec") + proto.RegisterType((*BackupBucketStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.BackupBucketStatus") + proto.RegisterType((*BackupEntry)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.BackupEntry") + proto.RegisterType((*BackupEntryList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.BackupEntryList") + proto.RegisterType((*BackupEntrySpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.BackupEntrySpec") + proto.RegisterType((*BackupEntryStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.BackupEntryStatus") + proto.RegisterType((*CRI)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.CRI") + proto.RegisterType((*CloudInfo)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.CloudInfo") + proto.RegisterType((*CloudProfile)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.CloudProfile") + proto.RegisterType((*CloudProfileList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.CloudProfileList") + proto.RegisterType((*CloudProfileSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.CloudProfileSpec") + proto.RegisterType((*ClusterAutoscaler)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ClusterAutoscaler") + proto.RegisterType((*ClusterInfo)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ClusterInfo") + proto.RegisterType((*Condition)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Condition") + proto.RegisterType((*ContainerRuntime)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ContainerRuntime") + proto.RegisterType((*ControllerDeployment)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ControllerDeployment") + proto.RegisterType((*ControllerInstallation)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ControllerInstallation") + proto.RegisterType((*ControllerInstallationList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ControllerInstallationList") + proto.RegisterType((*ControllerInstallationSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ControllerInstallationSpec") + proto.RegisterType((*ControllerInstallationStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ControllerInstallationStatus") + proto.RegisterType((*ControllerRegistration)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ControllerRegistration") + proto.RegisterType((*ControllerRegistrationList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ControllerRegistrationList") + proto.RegisterType((*ControllerRegistrationSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ControllerRegistrationSpec") + proto.RegisterType((*ControllerResource)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ControllerResource") + proto.RegisterType((*DNS)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.DNS") + proto.RegisterType((*DNSIncludeExclude)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.DNSIncludeExclude") + proto.RegisterType((*DNSProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.DNSProvider") + proto.RegisterType((*DataVolume)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.DataVolume") + proto.RegisterType((*Endpoint)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Endpoint") + proto.RegisterType((*ExpirableVersion)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ExpirableVersion") + proto.RegisterType((*Extension)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Extension") + proto.RegisterType((*ExtensionResourceState)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ExtensionResourceState") + proto.RegisterType((*Gardener)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Gardener") + proto.RegisterType((*GardenerResourceData)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.GardenerResourceData") + proto.RegisterType((*Hibernation)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Hibernation") + proto.RegisterType((*HibernationSchedule)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.HibernationSchedule") + proto.RegisterType((*HorizontalPodAutoscalerConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.HorizontalPodAutoscalerConfig") + proto.RegisterType((*Ingress)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Ingress") + proto.RegisterType((*IngressController)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.IngressController") + proto.RegisterType((*KubeAPIServerConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeAPIServerConfig") + proto.RegisterMapType((map[string]bool)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeAPIServerConfig.RuntimeConfigEntry") + proto.RegisterType((*KubeAPIServerRequests)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeAPIServerRequests") + proto.RegisterType((*KubeControllerManagerConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeControllerManagerConfig") + proto.RegisterType((*KubeProxyConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeProxyConfig") + proto.RegisterType((*KubeSchedulerConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeSchedulerConfig") + proto.RegisterType((*KubeletConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeletConfig") + proto.RegisterType((*KubeletConfigEviction)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeletConfigEviction") + proto.RegisterType((*KubeletConfigEvictionMinimumReclaim)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeletConfigEvictionMinimumReclaim") + proto.RegisterType((*KubeletConfigEvictionSoftGracePeriod)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeletConfigEvictionSoftGracePeriod") + proto.RegisterType((*KubeletConfigReserved)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubeletConfigReserved") + proto.RegisterType((*Kubernetes)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Kubernetes") + proto.RegisterType((*KubernetesConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubernetesConfig") + proto.RegisterMapType((map[string]bool)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubernetesConfig.FeatureGatesEntry") + proto.RegisterType((*KubernetesDashboard)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubernetesDashboard") + proto.RegisterType((*KubernetesInfo)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubernetesInfo") + proto.RegisterType((*KubernetesSettings)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.KubernetesSettings") + proto.RegisterType((*LastError)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.LastError") + proto.RegisterType((*LastOperation)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.LastOperation") + proto.RegisterType((*Machine)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Machine") + proto.RegisterType((*MachineControllerManagerSettings)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.MachineControllerManagerSettings") + proto.RegisterType((*MachineImage)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.MachineImage") + proto.RegisterType((*MachineImageVersion)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.MachineImageVersion") + proto.RegisterType((*MachineType)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.MachineType") + proto.RegisterType((*MachineTypeStorage)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.MachineTypeStorage") + proto.RegisterType((*Maintenance)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Maintenance") + proto.RegisterType((*MaintenanceAutoUpdate)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.MaintenanceAutoUpdate") + proto.RegisterType((*MaintenanceTimeWindow)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.MaintenanceTimeWindow") + proto.RegisterType((*Monitoring)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Monitoring") + proto.RegisterType((*NamedResourceReference)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.NamedResourceReference") + proto.RegisterType((*Networking)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Networking") + proto.RegisterType((*NginxIngress)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.NginxIngress") + proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.NginxIngress.ConfigEntry") + proto.RegisterType((*OIDCConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.OIDCConfig") + proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.OIDCConfig.RequiredClaimsEntry") + proto.RegisterType((*OpenIDConnectClientAuthentication)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.OpenIDConnectClientAuthentication") + proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.OpenIDConnectClientAuthentication.ExtraConfigEntry") + proto.RegisterType((*Plant)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Plant") + proto.RegisterType((*PlantList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.PlantList") + proto.RegisterType((*PlantSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.PlantSpec") + proto.RegisterType((*PlantStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.PlantStatus") + proto.RegisterType((*Project)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Project") + proto.RegisterType((*ProjectList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ProjectList") + proto.RegisterType((*ProjectMember)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ProjectMember") + proto.RegisterType((*ProjectSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ProjectSpec") + proto.RegisterType((*ProjectStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ProjectStatus") + proto.RegisterType((*ProjectTolerations)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ProjectTolerations") + proto.RegisterType((*Provider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Provider") + proto.RegisterType((*Quota)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Quota") + proto.RegisterType((*QuotaList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.QuotaList") + proto.RegisterType((*QuotaSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.QuotaSpec") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.QuotaSpec.MetricsEntry") + proto.RegisterType((*Region)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Region") + proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Region.LabelsEntry") + proto.RegisterType((*ResourceData)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ResourceData") + proto.RegisterType((*ResourceWatchCacheSize)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ResourceWatchCacheSize") + proto.RegisterType((*SecretBinding)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SecretBinding") + proto.RegisterType((*SecretBindingList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SecretBindingList") + proto.RegisterType((*Seed)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Seed") + proto.RegisterType((*SeedBackup)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedBackup") + proto.RegisterType((*SeedDNS)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedDNS") + proto.RegisterType((*SeedDNSProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedDNSProvider") + proto.RegisterType((*SeedList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedList") + proto.RegisterType((*SeedNetworks)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedNetworks") + proto.RegisterType((*SeedProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedProvider") + proto.RegisterType((*SeedSelector)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedSelector") + proto.RegisterType((*SeedSettingExcessCapacityReservation)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedSettingExcessCapacityReservation") + proto.RegisterType((*SeedSettingLoadBalancerServices)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedSettingLoadBalancerServices") + proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedSettingLoadBalancerServices.AnnotationsEntry") + proto.RegisterType((*SeedSettingScheduling)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedSettingScheduling") + proto.RegisterType((*SeedSettingShootDNS)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedSettingShootDNS") + proto.RegisterType((*SeedSettingVerticalPodAutoscaler)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedSettingVerticalPodAutoscaler") + proto.RegisterType((*SeedSettings)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedSettings") + proto.RegisterType((*SeedSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedSpec") + proto.RegisterType((*SeedStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedStatus") + proto.RegisterType((*SeedTaint)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedTaint") + proto.RegisterType((*SeedVolume)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedVolume") + proto.RegisterType((*SeedVolumeProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.SeedVolumeProvider") + proto.RegisterType((*ServiceAccountConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ServiceAccountConfig") + proto.RegisterType((*Shoot)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Shoot") + proto.RegisterType((*ShootList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ShootList") + proto.RegisterType((*ShootMachineImage)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ShootMachineImage") + proto.RegisterType((*ShootNetworks)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ShootNetworks") + proto.RegisterType((*ShootSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ShootSpec") + proto.RegisterType((*ShootState)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ShootState") + proto.RegisterType((*ShootStateList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ShootStateList") + proto.RegisterType((*ShootStateSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ShootStateSpec") + proto.RegisterType((*ShootStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.ShootStatus") + proto.RegisterType((*Toleration)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Toleration") + proto.RegisterType((*VerticalPodAutoscaler)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.VerticalPodAutoscaler") + proto.RegisterType((*Volume)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Volume") + proto.RegisterType((*VolumeType)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.VolumeType") + proto.RegisterType((*WatchCacheSizes)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.WatchCacheSizes") + proto.RegisterType((*Worker)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Worker") + proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Worker.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.Worker.LabelsEntry") + proto.RegisterType((*WorkerKubernetes)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.WorkerKubernetes") + proto.RegisterType((*WorkerSystemComponents)(nil), "github.com.gardener.gardener.pkg.apis.core.v1alpha1.WorkerSystemComponents") +} + +func init() { + proto.RegisterFile("github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.proto", fileDescriptor_f1caaec5647a9dbf) +} + +var fileDescriptor_f1caaec5647a9dbf = []byte{ + // 9050 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x7d, 0x6d, 0x70, 0x24, 0xc7, + 0x75, 0x98, 0x66, 0xf1, 0xb5, 0x78, 0x0b, 0xe0, 0x80, 0xbe, 0x0f, 0x82, 0x47, 0xf2, 0x96, 0x1a, + 0x89, 0x0a, 0x19, 0xc9, 0x38, 0x53, 0x94, 0x2c, 0x91, 0x22, 0x29, 0x62, 0x77, 0x71, 0x77, 0xab, + 0x3b, 0xdc, 0x2d, 0x7b, 0x0f, 0xa4, 0x3e, 0x52, 0xa6, 0x06, 0x33, 0x8d, 0xc5, 0x10, 0xb3, 0x33, + 0xcb, 0x99, 0x59, 0x1c, 0x70, 0x74, 0x22, 0x45, 0xb6, 0xe5, 0x24, 0x4e, 0x5c, 0x71, 0x59, 0x29, + 0xa5, 0x22, 0x95, 0x6d, 0xb9, 0x52, 0x29, 0xa7, 0xf2, 0x21, 0x97, 0x2b, 0x56, 0xca, 0x95, 0x94, + 0x7f, 0xd8, 0x3f, 0xac, 0xc8, 0x29, 0x47, 0x95, 0x72, 0x5c, 0x8a, 0xcb, 0x81, 0x22, 0xa4, 0x2a, + 0xdf, 0xc9, 0x9f, 0x54, 0xfe, 0x5c, 0x39, 0x29, 0x57, 0x7f, 0x4c, 0x4f, 0xcf, 0xec, 0x0c, 0xb0, + 0x98, 0xc1, 0x81, 0xe4, 0x2f, 0x60, 0xfb, 0x75, 0xbf, 0xd7, 0xd3, 0xfd, 0xfa, 0xf5, 0x7b, 0xaf, + 0x5f, 0xbf, 0x86, 0x66, 0xcf, 0x0e, 0xb7, 0x87, 0x9b, 0x2b, 0xa6, 0xd7, 0xbf, 0xda, 0x33, 0x7c, + 0x8b, 0xb8, 0xc4, 0x8f, 0xff, 0x19, 0xec, 0xf4, 0xae, 0x1a, 0x03, 0x3b, 0xb8, 0x6a, 0x7a, 0x3e, + 0xb9, 0xba, 0xfb, 0xac, 0xe1, 0x0c, 0xb6, 0x8d, 0x67, 0xaf, 0xf6, 0x28, 0xd0, 0x08, 0x89, 0xb5, + 0x32, 0xf0, 0xbd, 0xd0, 0x43, 0xcf, 0xc5, 0x48, 0x56, 0xa2, 0xb6, 0xf1, 0x3f, 0x83, 0x9d, 0xde, + 0x0a, 0x45, 0xb2, 0x42, 0x91, 0xac, 0x44, 0x48, 0x2e, 0x37, 0x4e, 0x44, 0x79, 0x93, 0x84, 0xa3, + 0x84, 0x2f, 0xff, 0x98, 0x8a, 0xc3, 0xeb, 0x79, 0x57, 0x59, 0xf1, 0xe6, 0x70, 0x8b, 0xfd, 0x62, + 0x3f, 0xd8, 0x7f, 0xa2, 0xfa, 0x33, 0x3b, 0x9f, 0x0c, 0x56, 0x6c, 0x8f, 0x22, 0xbe, 0x6a, 0x0c, + 0x43, 0x2f, 0x30, 0x0d, 0xc7, 0x76, 0x7b, 0x57, 0x77, 0x47, 0x31, 0xeb, 0x4a, 0x55, 0xd1, 0x85, + 0x23, 0xeb, 0xf8, 0x9b, 0x86, 0x99, 0x55, 0xe7, 0x63, 0x71, 0x9d, 0xbe, 0x61, 0x6e, 0xdb, 0x2e, + 0xf1, 0xf7, 0xa3, 0x8f, 0xbb, 0xea, 0x93, 0xc0, 0x1b, 0xfa, 0x26, 0x39, 0x51, 0xab, 0xe0, 0x6a, + 0x9f, 0x84, 0x46, 0x16, 0xad, 0xab, 0x79, 0xad, 0xfc, 0xa1, 0x1b, 0xda, 0xfd, 0x51, 0x32, 0x3f, + 0x71, 0x5c, 0x83, 0xc0, 0xdc, 0x26, 0x7d, 0x63, 0xa4, 0xdd, 0x73, 0x79, 0xed, 0x86, 0xa1, 0xed, + 0x5c, 0xb5, 0xdd, 0x30, 0x08, 0xfd, 0x74, 0x23, 0xfd, 0xa3, 0x30, 0xb5, 0x6a, 0x59, 0x9e, 0x8b, + 0x9e, 0x81, 0x19, 0xe2, 0x1a, 0x9b, 0x0e, 0xb1, 0x96, 0xb5, 0x27, 0xb5, 0xa7, 0xab, 0x8d, 0x73, + 0xdf, 0x3d, 0xa8, 0xbf, 0xef, 0xf0, 0xa0, 0x3e, 0xb3, 0xc6, 0x8b, 0x71, 0x04, 0xd7, 0xbf, 0x5e, + 0x81, 0x69, 0xd6, 0x28, 0x40, 0xbf, 0xa4, 0xc1, 0xf9, 0x9d, 0xe1, 0x26, 0xf1, 0x5d, 0x12, 0x92, + 0xa0, 0x65, 0x04, 0xdb, 0x9b, 0x9e, 0xe1, 0x73, 0x14, 0xb5, 0x8f, 0xde, 0x58, 0x29, 0xc0, 0x82, + 0x2b, 0x37, 0x47, 0xf1, 0x35, 0x1e, 0x39, 0x3c, 0xa8, 0x9f, 0xcf, 0x00, 0xe0, 0x2c, 0xea, 0xe8, + 0x1e, 0xcc, 0xb9, 0x3d, 0xdb, 0xdd, 0x6b, 0xbb, 0x3d, 0x9f, 0x04, 0xc1, 0x72, 0x85, 0xf5, 0x66, + 0xb5, 0x50, 0x6f, 0x6e, 0x2b, 0x88, 0x1a, 0x8b, 0x87, 0x07, 0xf5, 0x39, 0xb5, 0x04, 0x27, 0x08, + 0xe9, 0x5f, 0xd5, 0xe0, 0xdc, 0xaa, 0xd5, 0xb7, 0x83, 0xc0, 0xf6, 0xdc, 0x8e, 0x33, 0xec, 0xd9, + 0x2e, 0x7a, 0x12, 0x26, 0x5d, 0xa3, 0x4f, 0xd8, 0x90, 0xcc, 0x36, 0xe6, 0xc4, 0xa8, 0x4e, 0xde, + 0x36, 0xfa, 0x04, 0x33, 0x08, 0x7a, 0x15, 0xa6, 0x4d, 0xcf, 0xdd, 0xb2, 0x7b, 0xa2, 0xa3, 0x3f, + 0xb6, 0xc2, 0x67, 0x72, 0x45, 0x9d, 0x49, 0xd6, 0x3f, 0xc1, 0x01, 0x2b, 0xd8, 0xb8, 0xb7, 0xb6, + 0x17, 0x12, 0x97, 0x92, 0x69, 0xc0, 0xe1, 0x41, 0x7d, 0xba, 0xc9, 0x10, 0x60, 0x81, 0x48, 0xbf, + 0x06, 0xd5, 0x55, 0x87, 0xf8, 0xa1, 0xed, 0xf6, 0xd0, 0x0b, 0xb0, 0x40, 0xfa, 0x86, 0xed, 0x60, + 0x62, 0x12, 0x7b, 0x97, 0xf8, 0xc1, 0xb2, 0xf6, 0xe4, 0xc4, 0xd3, 0xb3, 0x0d, 0x74, 0x78, 0x50, + 0x5f, 0x58, 0x4b, 0x40, 0x70, 0xaa, 0xa6, 0xfe, 0x15, 0x0d, 0x6a, 0xab, 0x43, 0xcb, 0x0e, 0x39, + 0x7e, 0x14, 0x40, 0xcd, 0xa0, 0x3f, 0x3b, 0x9e, 0x63, 0x9b, 0xfb, 0x62, 0x9a, 0x5f, 0x29, 0x34, + 0xb0, 0xab, 0x31, 0x9e, 0xc6, 0xb9, 0xc3, 0x83, 0x7a, 0x4d, 0x29, 0xc0, 0x2a, 0x15, 0x7d, 0x1b, + 0x54, 0x18, 0xfa, 0x1c, 0xcc, 0xf1, 0xaf, 0x5c, 0x37, 0x06, 0x98, 0x6c, 0x89, 0x4e, 0x7c, 0x40, + 0x19, 0xb4, 0x88, 0xd2, 0xca, 0x9d, 0xcd, 0x37, 0x89, 0x19, 0x62, 0xb2, 0x45, 0x7c, 0xe2, 0x9a, + 0x84, 0xcf, 0x5f, 0x53, 0x69, 0x8c, 0x13, 0xa8, 0xf4, 0x1f, 0x6a, 0xb0, 0xb8, 0xba, 0x6b, 0xd8, + 0x8e, 0xb1, 0x69, 0x3b, 0x76, 0xb8, 0xff, 0x79, 0xcf, 0x25, 0x63, 0x4c, 0xe0, 0x06, 0x3c, 0x32, + 0x74, 0x0d, 0xde, 0xce, 0x21, 0xeb, 0x7c, 0xca, 0xee, 0xee, 0x0f, 0x08, 0x65, 0x3d, 0x3a, 0xd4, + 0x8f, 0x1d, 0x1e, 0xd4, 0x1f, 0xd9, 0xc8, 0xae, 0x82, 0xf3, 0xda, 0x22, 0x0c, 0x97, 0x14, 0xd0, + 0x6b, 0x9e, 0x33, 0xec, 0x0b, 0xac, 0x13, 0x0c, 0xeb, 0xe5, 0xc3, 0x83, 0xfa, 0xa5, 0x8d, 0xcc, + 0x1a, 0x38, 0xa7, 0xa5, 0xfe, 0xbd, 0x0a, 0xcc, 0x35, 0x0c, 0x73, 0x67, 0x38, 0x68, 0x0c, 0xcd, + 0x1d, 0x12, 0xa2, 0x2f, 0x42, 0x95, 0x4a, 0x2e, 0xcb, 0x08, 0x0d, 0x31, 0x92, 0x3f, 0x9e, 0xcb, + 0x7e, 0x6c, 0x16, 0x69, 0xed, 0x78, 0x6c, 0xd7, 0x49, 0x68, 0x34, 0x90, 0x18, 0x13, 0x88, 0xcb, + 0xb0, 0xc4, 0x8a, 0x7a, 0x30, 0x19, 0x0c, 0x88, 0x29, 0x98, 0x7b, 0xad, 0x10, 0xb3, 0xa8, 0x5d, + 0xee, 0x0e, 0x88, 0x19, 0x4f, 0x03, 0xfd, 0x85, 0x19, 0x01, 0xe4, 0xc1, 0x74, 0x10, 0x1a, 0xe1, + 0x90, 0x8e, 0x0f, 0x25, 0x75, 0xbd, 0x3c, 0x29, 0x86, 0xae, 0xb1, 0x20, 0x88, 0x4d, 0xf3, 0xdf, + 0x58, 0x90, 0xd1, 0x7f, 0xa0, 0xc1, 0xa2, 0x5a, 0xfd, 0x96, 0x1d, 0x84, 0xe8, 0x2f, 0x8d, 0x0c, + 0xe8, 0xca, 0x78, 0x03, 0x4a, 0x5b, 0xb3, 0xe1, 0x5c, 0x14, 0xe4, 0xaa, 0x51, 0x89, 0x32, 0x98, + 0x5b, 0x30, 0x65, 0x87, 0xa4, 0xcf, 0x19, 0xab, 0xa8, 0x4c, 0x53, 0xfb, 0xdc, 0x98, 0x17, 0xd4, + 0xa6, 0xda, 0x14, 0x2f, 0xe6, 0xe8, 0xf5, 0x2f, 0xc2, 0x05, 0xb5, 0x56, 0xc7, 0xf7, 0x76, 0x6d, + 0x8b, 0xf8, 0x74, 0x31, 0x84, 0xfb, 0x83, 0x91, 0xc5, 0x40, 0x99, 0x0b, 0x33, 0x08, 0xfa, 0x10, + 0x4c, 0xfb, 0xa4, 0x67, 0x7b, 0x2e, 0x9b, 0xf0, 0xd9, 0x78, 0xf0, 0x30, 0x2b, 0xc5, 0x02, 0xaa, + 0xff, 0xf7, 0x4a, 0x72, 0xf0, 0xe8, 0x44, 0xa2, 0x7b, 0x50, 0x1d, 0x08, 0x52, 0x62, 0xf0, 0xda, + 0xa5, 0xbf, 0x30, 0xea, 0x7b, 0x3c, 0xae, 0x51, 0x09, 0x96, 0xc4, 0x90, 0x0d, 0x0b, 0xd1, 0xff, + 0xcd, 0x12, 0xb2, 0x98, 0xc9, 0xd4, 0x4e, 0x02, 0x11, 0x4e, 0x21, 0x46, 0x77, 0x61, 0x36, 0x20, + 0xa6, 0x4f, 0xa8, 0x5c, 0x12, 0x9c, 0x9a, 0x29, 0xbc, 0xba, 0x51, 0x25, 0x21, 0xbc, 0x96, 0x44, + 0xf7, 0x67, 0x25, 0x00, 0xc7, 0x88, 0xd0, 0xe3, 0x30, 0x19, 0x10, 0x62, 0x2d, 0x4f, 0xb2, 0x41, + 0xaf, 0xb2, 0xa5, 0x41, 0x88, 0x85, 0x59, 0xa9, 0xfe, 0x6b, 0x93, 0x80, 0x46, 0x19, 0x5b, 0xfd, + 0x6a, 0x5e, 0x22, 0x06, 0xbd, 0xcc, 0x57, 0x8b, 0x35, 0x92, 0x42, 0x8c, 0xde, 0x86, 0x79, 0xc7, + 0x08, 0xc2, 0x3b, 0x03, 0xaa, 0x7e, 0x44, 0xdc, 0x51, 0xfb, 0x68, 0xa3, 0xd0, 0xf4, 0xde, 0x52, + 0x31, 0x35, 0x96, 0x0e, 0x0f, 0xea, 0xf3, 0x89, 0x22, 0x9c, 0xa4, 0x85, 0x76, 0x60, 0x96, 0x16, + 0xac, 0xf9, 0xbe, 0xe7, 0x8b, 0x21, 0x7f, 0xb9, 0x30, 0x61, 0x86, 0xa5, 0x31, 0x4f, 0x67, 0x42, + 0xfe, 0xc4, 0x31, 0x7e, 0xf4, 0x19, 0x40, 0xde, 0x66, 0x40, 0xfc, 0x5d, 0x62, 0x5d, 0xe7, 0xda, + 0x16, 0xfd, 0x5c, 0x3a, 0x2f, 0x13, 0x8d, 0xcb, 0x62, 0x0e, 0xd1, 0x9d, 0x91, 0x1a, 0x38, 0xa3, + 0x15, 0xda, 0x01, 0x24, 0x35, 0x36, 0x39, 0xed, 0xcb, 0x53, 0xe3, 0x33, 0xcd, 0x25, 0x4a, 0xec, + 0xfa, 0x08, 0x0a, 0x9c, 0x81, 0x56, 0xff, 0xfd, 0x0a, 0xd4, 0x38, 0x93, 0xac, 0xb9, 0xa1, 0xbf, + 0x7f, 0x06, 0x5b, 0xc3, 0x56, 0x62, 0x6b, 0x68, 0x95, 0x58, 0xea, 0xac, 0xc7, 0xb9, 0x3b, 0x83, + 0x9b, 0xda, 0x19, 0xae, 0x95, 0xa6, 0x74, 0xf4, 0xc6, 0xf0, 0xc7, 0x1a, 0x9c, 0x53, 0x6a, 0x9f, + 0xc1, 0xbe, 0x40, 0x92, 0xfb, 0xc2, 0x2b, 0x65, 0x3f, 0x30, 0x67, 0x5b, 0x30, 0x13, 0xdf, 0xc5, + 0x44, 0xf6, 0x47, 0x01, 0x36, 0x99, 0x4c, 0xb9, 0x1d, 0x2b, 0x49, 0x72, 0xd6, 0x1b, 0x12, 0x82, + 0x95, 0x5a, 0x52, 0x58, 0x55, 0x32, 0x85, 0xd5, 0xbf, 0xaa, 0xc0, 0xd2, 0xc8, 0x58, 0x8f, 0x0a, + 0x10, 0xed, 0x9d, 0x12, 0x20, 0x95, 0x77, 0x44, 0x80, 0x4c, 0x14, 0x11, 0x20, 0xfa, 0xef, 0x69, + 0x30, 0xd1, 0xc4, 0x6d, 0xf4, 0xe1, 0x84, 0x12, 0xfb, 0x88, 0xaa, 0xc4, 0x3e, 0x38, 0xa8, 0xcf, + 0x34, 0x71, 0x5b, 0xd1, 0x67, 0x7f, 0x41, 0x83, 0x25, 0xd3, 0x73, 0x43, 0x83, 0x72, 0x24, 0xe6, + 0x12, 0x3f, 0xe2, 0xac, 0x62, 0xfa, 0x5b, 0x33, 0x85, 0xad, 0xf1, 0xa8, 0xe8, 0xc1, 0x52, 0x1a, + 0x12, 0xe0, 0x51, 0xd2, 0xfa, 0x06, 0xcc, 0x36, 0x1d, 0x6f, 0x68, 0xb5, 0xdd, 0x2d, 0xef, 0x14, + 0x55, 0x90, 0x7f, 0xaf, 0xc1, 0x1c, 0xc3, 0xdb, 0xf1, 0xbd, 0x2d, 0xdb, 0x21, 0xef, 0x11, 0x65, + 0x58, 0xed, 0x72, 0x9e, 0xc8, 0x63, 0xba, 0xa9, 0x5a, 0xf1, 0xbd, 0xa2, 0x9b, 0xaa, 0x7d, 0xce, + 0x11, 0x42, 0xdf, 0x98, 0x49, 0x7e, 0x1a, 0x13, 0x43, 0x4f, 0x43, 0xd5, 0x34, 0x1a, 0x43, 0xd7, + 0x72, 0x24, 0x67, 0xd0, 0x6e, 0x36, 0x57, 0x79, 0x19, 0x96, 0x50, 0xf4, 0x36, 0x40, 0xec, 0x34, + 0x10, 0x13, 0x71, 0xbd, 0xa4, 0xa7, 0xa2, 0x4b, 0x42, 0x6a, 0x6b, 0x07, 0xf1, 0xec, 0xc7, 0x30, + 0xac, 0x90, 0x43, 0x7f, 0x05, 0xe6, 0xc5, 0x30, 0xb7, 0xfb, 0x46, 0x4f, 0x98, 0x72, 0x45, 0xc7, + 0x6a, 0x5d, 0xc1, 0xd4, 0xb8, 0x28, 0x28, 0xcf, 0xab, 0xa5, 0x01, 0x4e, 0x92, 0x43, 0xf7, 0x61, + 0xae, 0xaf, 0xda, 0xa7, 0x93, 0x25, 0xb6, 0x0b, 0xc5, 0x58, 0x6d, 0x5c, 0x10, 0xd4, 0xe7, 0x12, + 0xa6, 0x6d, 0x82, 0x56, 0x86, 0x8e, 0x3d, 0xf5, 0xb0, 0x74, 0xec, 0x2d, 0x98, 0xe1, 0x6b, 0x3c, + 0x58, 0x9e, 0x66, 0x5f, 0xf8, 0xa9, 0x42, 0x5f, 0xc8, 0xe5, 0x45, 0xec, 0x0a, 0xe3, 0xbf, 0x03, + 0x1c, 0x21, 0x47, 0xf7, 0x60, 0x8e, 0x6e, 0x59, 0x5d, 0xe2, 0x10, 0x33, 0xf4, 0xfc, 0xe5, 0x99, + 0x12, 0x9e, 0xa6, 0xae, 0x82, 0x88, 0x7b, 0x2a, 0xd4, 0x12, 0x9c, 0x20, 0x24, 0x85, 0x60, 0x35, + 0x57, 0x08, 0xee, 0x42, 0x6d, 0x57, 0x71, 0x19, 0xcc, 0xb2, 0x61, 0xf8, 0x74, 0xa1, 0x9e, 0xc5, + 0x0e, 0x84, 0xc6, 0x79, 0x41, 0xa9, 0xa6, 0x3a, 0x1b, 0x54, 0x42, 0xfa, 0xcf, 0x4d, 0xc3, 0x52, + 0xd3, 0x19, 0x06, 0x21, 0xf1, 0x57, 0x85, 0x37, 0x97, 0xf8, 0xe8, 0x2b, 0x1a, 0x5c, 0x62, 0xff, + 0xb6, 0xbc, 0x7b, 0x6e, 0x8b, 0x38, 0xc6, 0xfe, 0xea, 0x16, 0xad, 0x61, 0x59, 0x27, 0x13, 0x44, + 0xad, 0xa1, 0xd8, 0xb3, 0x99, 0xf3, 0xa3, 0x9b, 0x89, 0x11, 0xe7, 0x50, 0x42, 0x3f, 0xaf, 0xc1, + 0xa3, 0x19, 0xa0, 0x16, 0x71, 0x48, 0x48, 0x84, 0x24, 0x38, 0x69, 0x3f, 0x9e, 0x38, 0x3c, 0xa8, + 0x3f, 0xda, 0xcd, 0x43, 0x8a, 0xf3, 0xe9, 0xd1, 0x5d, 0xf6, 0x72, 0x06, 0xf4, 0x9a, 0x61, 0x3b, + 0x43, 0x9f, 0x08, 0x4d, 0xf5, 0xa4, 0xdd, 0xb9, 0x72, 0x78, 0x50, 0xbf, 0xdc, 0xcd, 0xc5, 0x8a, + 0x8f, 0xa0, 0x88, 0xbe, 0x04, 0x17, 0x25, 0x74, 0xc3, 0x75, 0x09, 0xb1, 0x88, 0x75, 0xd7, 0xee, + 0x13, 0x66, 0xbb, 0x9c, 0xbc, 0x2b, 0x8f, 0x1e, 0x1e, 0xd4, 0x2f, 0x76, 0xb3, 0x10, 0xe2, 0x6c, + 0x3a, 0xa8, 0x07, 0x4f, 0xc4, 0x80, 0xd0, 0x76, 0xec, 0xfb, 0x0c, 0xd3, 0xdd, 0x6d, 0x9f, 0x04, + 0xdb, 0x9e, 0x63, 0x31, 0x79, 0xa1, 0x35, 0xde, 0x7f, 0x78, 0x50, 0x7f, 0xa2, 0x7b, 0x54, 0x45, + 0x7c, 0x34, 0x1e, 0x64, 0xc1, 0x5c, 0x60, 0x1a, 0x6e, 0xdb, 0x0d, 0x89, 0xbf, 0x6b, 0x38, 0xcb, + 0xd3, 0x85, 0x3e, 0x90, 0xaf, 0x51, 0x05, 0x0f, 0x4e, 0x60, 0xd5, 0xff, 0x87, 0x06, 0x35, 0xb1, + 0x12, 0x98, 0xe2, 0x62, 0xc2, 0x94, 0x49, 0xb7, 0x2d, 0xc1, 0xf1, 0x2f, 0x17, 0xdf, 0x1f, 0x29, + 0xba, 0x78, 0x73, 0x64, 0x45, 0x98, 0xe3, 0x46, 0xf7, 0x32, 0x76, 0xb7, 0x66, 0xc9, 0xdd, 0x8d, + 0x91, 0x3b, 0x66, 0x67, 0xd3, 0x0f, 0x26, 0x60, 0xb6, 0xe9, 0xb9, 0x96, 0xcd, 0x14, 0xe6, 0x67, + 0x13, 0x4a, 0xda, 0x13, 0xaa, 0x7c, 0x7a, 0x70, 0x50, 0x9f, 0x97, 0x15, 0x15, 0x81, 0xf5, 0xbc, + 0x34, 0xd2, 0xb8, 0xd6, 0xf6, 0xfe, 0xa4, 0x71, 0xf5, 0xe0, 0xa0, 0x7e, 0x4e, 0x36, 0x4b, 0xda, + 0x5b, 0x68, 0x17, 0x10, 0x55, 0x9f, 0xef, 0xfa, 0x86, 0x1b, 0x70, 0xb4, 0x94, 0x6d, 0xf9, 0x0a, + 0xfa, 0x8b, 0xe3, 0xcd, 0x2a, 0x6d, 0x11, 0x6b, 0xd7, 0xb7, 0x46, 0xb0, 0xe1, 0x0c, 0x0a, 0xe8, + 0x4d, 0x58, 0xa0, 0xa5, 0x1b, 0x03, 0xcb, 0x08, 0x89, 0xb2, 0x54, 0x4e, 0x42, 0xf3, 0x92, 0xa0, + 0xb9, 0x70, 0x2b, 0x81, 0x09, 0xa7, 0x30, 0x73, 0xa5, 0xd6, 0x08, 0x3c, 0x97, 0xad, 0x82, 0x84, + 0x52, 0x4b, 0x4b, 0xb1, 0x80, 0xa2, 0x67, 0x60, 0xa6, 0x4f, 0x82, 0xc0, 0xe8, 0x11, 0xc6, 0xd6, + 0xb3, 0xf1, 0xee, 0xb5, 0xce, 0x8b, 0x71, 0x04, 0x47, 0x1f, 0x81, 0x29, 0xd3, 0xb3, 0x48, 0xb0, + 0x3c, 0xc3, 0xfc, 0xc9, 0x97, 0x18, 0x33, 0xd1, 0x82, 0x07, 0x07, 0xf5, 0x59, 0x66, 0x8b, 0xd0, + 0x5f, 0x98, 0x57, 0xd2, 0x7f, 0x85, 0x6a, 0x94, 0x29, 0xd5, 0x3c, 0xcf, 0x39, 0xce, 0xa7, 0x95, + 0x19, 0x13, 0x67, 0xe7, 0x59, 0xd3, 0xbf, 0x5f, 0x81, 0x0b, 0xb4, 0x87, 0xbe, 0xe7, 0x38, 0x54, + 0xcc, 0x0e, 0x1c, 0x6f, 0xbf, 0x4f, 0xdc, 0x70, 0x0c, 0x93, 0xe1, 0x0c, 0xfd, 0x7f, 0xaf, 0xc0, + 0xf4, 0x80, 0x1f, 0x9f, 0x4c, 0xb0, 0xee, 0x3c, 0x4d, 0x27, 0x91, 0x9f, 0x6d, 0x3c, 0x38, 0xa8, + 0x5f, 0xce, 0xfa, 0x00, 0x71, 0x2a, 0x22, 0xda, 0x21, 0x3b, 0xa5, 0x75, 0x70, 0xa6, 0x7b, 0x6e, + 0x4c, 0x55, 0xde, 0xd8, 0x24, 0xce, 0xb8, 0x7a, 0x86, 0xfe, 0x5f, 0x2b, 0x70, 0x29, 0xee, 0x51, + 0xdb, 0x0d, 0x42, 0xc3, 0x71, 0xb8, 0x4d, 0xfc, 0xf0, 0x8d, 0xa5, 0xb7, 0x12, 0xc6, 0xd2, 0x9d, + 0xc2, 0x96, 0xe7, 0x68, 0xe7, 0x73, 0x3d, 0x45, 0xfb, 0x29, 0x4f, 0xd1, 0xab, 0xa7, 0x49, 0xf4, + 0x68, 0xa7, 0xd1, 0xff, 0xd4, 0xe0, 0x72, 0x76, 0xc3, 0x33, 0xb0, 0xdd, 0x06, 0x49, 0xdb, 0xed, + 0xe6, 0x29, 0x7e, 0x76, 0x8e, 0x15, 0xf7, 0x47, 0xb9, 0x9f, 0xcb, 0xec, 0xb9, 0x2d, 0x38, 0x47, + 0x95, 0xec, 0x20, 0x14, 0xce, 0x8d, 0x93, 0x1d, 0xf4, 0x45, 0x0e, 0x8e, 0x73, 0x38, 0x89, 0x03, + 0xa7, 0x91, 0xa2, 0xdb, 0x30, 0x43, 0x19, 0x9e, 0xe2, 0xaf, 0x8c, 0x8f, 0x5f, 0xca, 0xd4, 0x2e, + 0x6f, 0x8b, 0x23, 0x24, 0xfa, 0xff, 0xd3, 0xe0, 0xf1, 0xa3, 0xa6, 0x1f, 0xf9, 0x00, 0x66, 0xb4, + 0x8d, 0xf1, 0xa3, 0xd8, 0xc2, 0xaa, 0x40, 0x84, 0x26, 0x5e, 0x46, 0xb2, 0x28, 0xc0, 0x0a, 0x95, + 0x0c, 0x3f, 0x7f, 0xe5, 0x21, 0xf9, 0xf9, 0xf5, 0xff, 0xa5, 0xa9, 0x02, 0x43, 0x1d, 0xfe, 0xf7, + 0x9c, 0xc0, 0x50, 0x3b, 0x9f, 0xeb, 0x67, 0x49, 0xae, 0x5a, 0xb5, 0xc9, 0x7b, 0x6f, 0xd5, 0xaa, + 0xbd, 0xcf, 0x59, 0xb5, 0xbf, 0x58, 0xc9, 0xfb, 0x5c, 0xb6, 0x6a, 0xf7, 0x60, 0x36, 0x0a, 0x9f, + 0x89, 0x78, 0xfb, 0x7a, 0xe9, 0x4e, 0x71, 0x7c, 0xf1, 0xf9, 0x57, 0x54, 0x12, 0xe0, 0x98, 0x18, + 0xda, 0x07, 0xb0, 0xe4, 0x7e, 0x29, 0x18, 0xa0, 0x5d, 0x92, 0x74, 0xbc, 0x01, 0x37, 0x16, 0x28, + 0xcf, 0xc5, 0xbf, 0xb1, 0x42, 0x4c, 0xff, 0x8d, 0x0a, 0xa0, 0xd1, 0xfe, 0x52, 0xa5, 0x63, 0xc7, + 0x76, 0xad, 0xb4, 0xd2, 0x71, 0xd3, 0x76, 0x2d, 0xcc, 0x20, 0x52, 0x2d, 0xa9, 0xe4, 0xaa, 0x25, + 0x2f, 0xc1, 0xb9, 0x9e, 0xe3, 0x6d, 0x1a, 0x8e, 0xb3, 0x2f, 0xc2, 0x70, 0xd8, 0xbe, 0x54, 0x6d, + 0x9c, 0xa7, 0xc2, 0xed, 0x7a, 0x12, 0x84, 0xd3, 0x75, 0xd1, 0x00, 0x16, 0x7d, 0x62, 0x7a, 0xae, + 0x69, 0x3b, 0x4c, 0x89, 0xf4, 0x86, 0x61, 0x41, 0x63, 0xee, 0xc2, 0xe1, 0x41, 0x7d, 0x11, 0xa7, + 0x70, 0xe1, 0x11, 0xec, 0xe8, 0x29, 0x98, 0x19, 0xf8, 0x76, 0xdf, 0xf0, 0xf7, 0x99, 0x9a, 0x5a, + 0x6d, 0xd4, 0xa8, 0x94, 0xec, 0xf0, 0x22, 0x1c, 0xc1, 0xf4, 0x6f, 0x6a, 0x30, 0xd1, 0xba, 0xdd, + 0x45, 0x3a, 0x4c, 0x5b, 0x5e, 0xdf, 0xb0, 0x5d, 0x31, 0x4a, 0x2c, 0x96, 0xa5, 0xc5, 0x4a, 0xb0, + 0x80, 0xa0, 0xb7, 0x60, 0x36, 0x92, 0x31, 0xe5, 0x8e, 0x37, 0x5a, 0xb7, 0xbb, 0xf2, 0x2c, 0x58, + 0x32, 0x53, 0x54, 0x12, 0xe0, 0x98, 0x8a, 0x6e, 0xc0, 0x52, 0xeb, 0x76, 0xb7, 0xed, 0x9a, 0xce, + 0xd0, 0x22, 0x6b, 0x7b, 0xec, 0x0f, 0xfd, 0x34, 0x9b, 0x97, 0x88, 0x00, 0x1a, 0xf6, 0x69, 0xa2, + 0x12, 0x8e, 0x60, 0xb4, 0x1a, 0xe1, 0x2d, 0x44, 0xf0, 0x07, 0xab, 0x26, 0x90, 0xe0, 0x08, 0xa6, + 0xff, 0x49, 0x05, 0x6a, 0x4a, 0x87, 0x50, 0x1f, 0x66, 0xf8, 0xf7, 0x46, 0x67, 0xb0, 0xd7, 0x8a, + 0x7e, 0x63, 0xb2, 0xdb, 0x9c, 0x3c, 0x1f, 0xd2, 0x00, 0x47, 0x34, 0xd4, 0x79, 0xaa, 0xe4, 0xcf, + 0x13, 0x5a, 0x01, 0xe0, 0x47, 0xcc, 0xec, 0x70, 0x87, 0xeb, 0xab, 0x6c, 0x29, 0x74, 0x65, 0x29, + 0x56, 0x6a, 0xa0, 0xc7, 0x05, 0x47, 0x2b, 0xa7, 0xd0, 0x0a, 0x37, 0xf7, 0x60, 0xea, 0xbe, 0xe7, + 0x92, 0x40, 0xf8, 0xfd, 0x4e, 0xeb, 0x0b, 0x67, 0xa9, 0x94, 0xfa, 0x3c, 0x45, 0x8c, 0x39, 0x7e, + 0xfd, 0x5b, 0x1a, 0x40, 0xcb, 0x08, 0x0d, 0xee, 0xa4, 0x1a, 0x23, 0x82, 0xe7, 0xf1, 0xc4, 0x4a, + 0xac, 0x8e, 0x9c, 0x27, 0x4c, 0x06, 0xf6, 0xfd, 0xe8, 0xfb, 0xe5, 0x16, 0xc4, 0xb1, 0x77, 0xed, + 0xfb, 0x04, 0x33, 0x38, 0xfa, 0x30, 0xcc, 0x12, 0xd7, 0xf4, 0xf7, 0x07, 0xa1, 0x38, 0x88, 0xaf, + 0xf2, 0x53, 0x9e, 0xb5, 0xa8, 0x10, 0xc7, 0x70, 0x7d, 0x17, 0xaa, 0x6b, 0xae, 0x35, 0xf0, 0x6c, + 0x6e, 0x9f, 0x1c, 0xd3, 0xc1, 0x27, 0x60, 0x62, 0xe8, 0x3b, 0xa2, 0x7f, 0x35, 0x51, 0x61, 0x62, + 0x03, 0xdf, 0xc2, 0xb4, 0x9c, 0x1a, 0x7d, 0x83, 0xa1, 0x3f, 0xf0, 0x82, 0xa8, 0x93, 0x52, 0x41, + 0xe9, 0xf0, 0x62, 0x1c, 0xc1, 0xf5, 0x07, 0x1a, 0x2c, 0xae, 0xed, 0x0d, 0x6c, 0x9f, 0x85, 0x06, + 0x11, 0x9f, 0xee, 0xec, 0xb4, 0xfd, 0x2e, 0xff, 0x57, 0xf4, 0x41, 0xb6, 0x17, 0x35, 0x70, 0x04, + 0x47, 0x5b, 0xb0, 0x40, 0x58, 0x73, 0x2a, 0x17, 0x5a, 0x86, 0x74, 0x9c, 0x9d, 0xc4, 0xe6, 0xe5, + 0xa1, 0x67, 0x09, 0x2c, 0x38, 0x85, 0x15, 0x75, 0x61, 0xc1, 0x74, 0x8c, 0x20, 0xb0, 0xb7, 0x6c, + 0x33, 0x3e, 0x01, 0x9b, 0x6d, 0x7c, 0x98, 0xb6, 0x6d, 0x26, 0x20, 0x0f, 0x0e, 0xea, 0x17, 0x45, + 0x3f, 0x93, 0x00, 0x9c, 0x42, 0xa1, 0xff, 0xae, 0x06, 0xb3, 0x52, 0x9f, 0x79, 0x77, 0x99, 0x85, + 0x4f, 0x43, 0xd5, 0xb2, 0x03, 0x55, 0xc6, 0xb3, 0x03, 0x8c, 0x96, 0x28, 0xc3, 0x12, 0xaa, 0xff, + 0xeb, 0x0a, 0x5c, 0x92, 0xb8, 0xa3, 0xed, 0x86, 0xaa, 0x5f, 0xe3, 0xec, 0x39, 0x8f, 0x0b, 0x56, + 0x53, 0x38, 0x5d, 0x61, 0xb3, 0xa7, 0xd2, 0x7c, 0x54, 0xcb, 0xe2, 0x21, 0x74, 0x1b, 0xa6, 0xa8, + 0xd1, 0x12, 0xb9, 0x3b, 0x4e, 0x38, 0x1a, 0x6c, 0xbd, 0xb2, 0xfe, 0x62, 0x8e, 0x06, 0xbd, 0xad, + 0xaa, 0x0d, 0x53, 0x4c, 0xc4, 0x7f, 0xe6, 0x64, 0xc2, 0x81, 0x45, 0x22, 0xaf, 0xd0, 0x2f, 0xb1, + 0xa2, 0x11, 0xc9, 0x88, 0x9c, 0xc9, 0xd2, 0x1c, 0xf4, 0x00, 0xaa, 0xd7, 0x05, 0x5a, 0x74, 0x19, + 0x2a, 0x76, 0x34, 0x7a, 0x20, 0x5a, 0x55, 0xda, 0x2d, 0x5c, 0xb1, 0x2d, 0xb9, 0x48, 0x2b, 0xb9, + 0x8b, 0x54, 0x59, 0x45, 0x13, 0x47, 0xaf, 0x22, 0xfd, 0x1f, 0x69, 0x70, 0x21, 0xa2, 0x1a, 0xf5, + 0x8a, 0x4a, 0xac, 0x31, 0x44, 0xc1, 0xf1, 0x5a, 0xc3, 0x1d, 0x98, 0x64, 0x0a, 0xe7, 0x44, 0x91, + 0xd9, 0x91, 0x08, 0x69, 0x77, 0x30, 0x43, 0xa4, 0x7f, 0x5b, 0x83, 0xda, 0x0d, 0x7b, 0x93, 0xf8, + 0x2e, 0xd7, 0xe4, 0x9f, 0x4a, 0x07, 0x0b, 0xd7, 0xb2, 0x02, 0x85, 0xd1, 0x3e, 0xcc, 0x06, 0xe6, + 0x36, 0xb1, 0x86, 0x8e, 0x3c, 0x3e, 0x2e, 0x16, 0x12, 0xac, 0xd0, 0xee, 0x0a, 0x84, 0x4a, 0x38, + 0x54, 0x44, 0x02, 0xc7, 0xd4, 0xf4, 0xb7, 0xe1, 0x7c, 0x46, 0x23, 0x54, 0x67, 0x8c, 0xeb, 0x87, + 0x62, 0x78, 0x23, 0x4e, 0xf4, 0x43, 0xcc, 0xcb, 0xd1, 0xa3, 0x30, 0x41, 0xdc, 0x28, 0x30, 0x61, + 0x86, 0xca, 0xd8, 0x35, 0xd7, 0xc2, 0xb4, 0x8c, 0x2e, 0x50, 0xc7, 0x4b, 0x88, 0x22, 0xb6, 0x40, + 0x6f, 0x89, 0x32, 0x2c, 0xa1, 0xfa, 0xdf, 0x9c, 0x86, 0x27, 0x6e, 0x78, 0xbe, 0x7d, 0xdf, 0x73, + 0x43, 0xc3, 0xe9, 0x78, 0x56, 0x7c, 0x10, 0x22, 0x16, 0xfb, 0xcf, 0x6a, 0xf0, 0x88, 0x39, 0x18, + 0xb6, 0x5d, 0x3b, 0xb4, 0x8d, 0xc8, 0x3f, 0xdd, 0x21, 0xbe, 0xed, 0x15, 0x3d, 0x0f, 0x61, 0x21, + 0xa6, 0xcd, 0xce, 0x46, 0x16, 0x4a, 0x9c, 0x47, 0x0b, 0xbd, 0x09, 0x0b, 0x96, 0x77, 0xcf, 0xe5, + 0xde, 0x72, 0xe2, 0x18, 0xfb, 0x05, 0x4f, 0x41, 0x98, 0x80, 0x6b, 0x25, 0x30, 0xe1, 0x14, 0x66, + 0x76, 0x04, 0x24, 0x8b, 0xba, 0x21, 0x0b, 0xb1, 0xbd, 0x1f, 0x0f, 0x67, 0xc1, 0x23, 0xa0, 0x56, + 0x26, 0x46, 0x9c, 0x43, 0x09, 0x7d, 0x09, 0x2e, 0xda, 0x7c, 0x20, 0x30, 0x31, 0x2c, 0xdb, 0x25, + 0x41, 0xc0, 0xbf, 0xbb, 0xc4, 0x19, 0x47, 0x3b, 0x0b, 0x21, 0xce, 0xa6, 0x83, 0x7e, 0x12, 0x20, + 0xd8, 0x77, 0x4d, 0x31, 0xd7, 0x53, 0x85, 0xa8, 0x72, 0x0d, 0x4c, 0x62, 0xc1, 0x0a, 0x46, 0xaa, + 0x83, 0x84, 0x9e, 0x43, 0x7c, 0xc3, 0x35, 0xb9, 0x03, 0x58, 0xe3, 0x3a, 0xc8, 0xdd, 0xa8, 0x10, + 0xc7, 0x70, 0x64, 0xc1, 0xdc, 0x70, 0xa0, 0x4c, 0xfe, 0x4c, 0xf1, 0x73, 0x90, 0x0d, 0x05, 0x0f, + 0x4e, 0x60, 0xd5, 0xff, 0x89, 0x06, 0x33, 0x22, 0x42, 0x1e, 0x7d, 0x28, 0xa5, 0xf0, 0x4b, 0x67, + 0x58, 0x4a, 0xe9, 0xbf, 0xcf, 0xbc, 0x24, 0xc2, 0xa4, 0x12, 0x4c, 0x59, 0x4c, 0x5f, 0x14, 0x94, + 0x63, 0x03, 0x2d, 0xe1, 0x2d, 0x89, 0x8c, 0x36, 0x85, 0x9a, 0xfe, 0xab, 0x1a, 0x2c, 0x8d, 0xb4, + 0x1a, 0x63, 0x6b, 0x3d, 0x43, 0x4f, 0xf7, 0x37, 0x01, 0xd8, 0x75, 0x88, 0xd5, 0x4e, 0xbb, 0x4b, + 0xfc, 0x5d, 0x29, 0x57, 0x7e, 0x5e, 0x83, 0xc5, 0xf8, 0x4c, 0x46, 0xf4, 0x42, 0x2b, 0x11, 0x6b, + 0x72, 0x33, 0x85, 0xac, 0xb1, 0x2c, 0x3e, 0x7c, 0x31, 0x0d, 0xc1, 0x23, 0x84, 0xd1, 0xdf, 0xd0, + 0x60, 0xd1, 0x48, 0x5e, 0x87, 0x88, 0xf6, 0x81, 0x62, 0xb1, 0x7e, 0xa9, 0xbb, 0x15, 0x71, 0x67, + 0x52, 0x80, 0x00, 0x8f, 0xd0, 0x45, 0x1f, 0x83, 0x39, 0x63, 0x60, 0xaf, 0x0e, 0x2d, 0x9b, 0xea, + 0x04, 0x51, 0x0c, 0x3d, 0xe3, 0xdd, 0xd5, 0x4e, 0x5b, 0x96, 0xe3, 0x44, 0x2d, 0x79, 0xe1, 0x41, + 0x0c, 0xe5, 0x64, 0xd9, 0x0b, 0x0f, 0x62, 0x14, 0xe3, 0x0b, 0x0f, 0x62, 0xf0, 0x54, 0x2a, 0xe8, + 0x0b, 0xf0, 0x28, 0xdf, 0x42, 0x1b, 0x46, 0x60, 0x9b, 0xab, 0xc3, 0x70, 0x9b, 0xb8, 0x61, 0xa4, + 0x05, 0x73, 0xb3, 0x9a, 0x1d, 0x3b, 0xaf, 0xe5, 0x55, 0xc2, 0xf9, 0xed, 0x91, 0x07, 0xe0, 0xd9, + 0x96, 0x29, 0x3e, 0x88, 0x9f, 0x7c, 0x16, 0x0b, 0x0b, 0xb8, 0xd3, 0x6e, 0x35, 0xc5, 0xf7, 0x30, + 0x89, 0x14, 0xff, 0xc6, 0x0a, 0x09, 0xf4, 0x77, 0x35, 0x98, 0x17, 0x8c, 0x2e, 0x88, 0xce, 0x30, + 0x16, 0xf8, 0x42, 0x61, 0x86, 0x4c, 0x71, 0xfd, 0x0a, 0x56, 0xb1, 0xf3, 0xf0, 0x45, 0x19, 0x0d, + 0x93, 0x80, 0xe1, 0x64, 0x47, 0xd0, 0xd7, 0x35, 0xb8, 0x10, 0x10, 0x7f, 0xd7, 0x36, 0xc9, 0xaa, + 0x69, 0x7a, 0x43, 0x37, 0x9a, 0xe7, 0x6a, 0x09, 0xff, 0x51, 0x37, 0x03, 0x61, 0x63, 0xf9, 0xf0, + 0xa0, 0x7e, 0x21, 0x0b, 0x82, 0x33, 0x3b, 0x80, 0x7e, 0x5a, 0x83, 0x73, 0xf7, 0x8c, 0xd0, 0xdc, + 0x6e, 0x1a, 0xe6, 0x36, 0xb3, 0x31, 0x83, 0xe5, 0xd9, 0x12, 0x51, 0xb2, 0xaf, 0x27, 0x71, 0x71, + 0xff, 0x51, 0xaa, 0x10, 0xa7, 0x29, 0xa2, 0x10, 0xaa, 0x3e, 0x79, 0x6b, 0x48, 0x82, 0x30, 0x58, + 0x06, 0x46, 0xfd, 0x33, 0xe5, 0x27, 0x0d, 0x0b, 0x8c, 0x5c, 0x7d, 0x8a, 0x7e, 0x61, 0x49, 0xe9, + 0xf2, 0x2b, 0x80, 0x46, 0x67, 0x14, 0x2d, 0xc2, 0xc4, 0x0e, 0xe1, 0x57, 0x8e, 0x66, 0x31, 0xfd, + 0x17, 0x5d, 0x80, 0xa9, 0x5d, 0xc3, 0x19, 0x72, 0x4d, 0xb8, 0x8a, 0xf9, 0x8f, 0x17, 0x2a, 0x9f, + 0xd4, 0xf4, 0xef, 0x68, 0x70, 0x31, 0x93, 0x26, 0xc2, 0x70, 0xa9, 0x6f, 0xec, 0xdd, 0xf6, 0xdc, + 0xf5, 0x61, 0x68, 0x84, 0xb6, 0xdb, 0x6b, 0xbb, 0x5b, 0x8e, 0xdd, 0xdb, 0xe6, 0x1a, 0xe1, 0x14, + 0xd7, 0x29, 0xd6, 0x33, 0x6b, 0xe0, 0x9c, 0x96, 0xa8, 0x0d, 0xe7, 0xfb, 0xc6, 0xde, 0x08, 0xc2, + 0x0a, 0x43, 0xc8, 0x6e, 0xae, 0xad, 0x8f, 0x82, 0x71, 0x56, 0x1b, 0xfd, 0xeb, 0x93, 0xf0, 0x18, + 0xed, 0x78, 0xbc, 0xef, 0xac, 0x1b, 0xae, 0xd1, 0x7b, 0x97, 0xca, 0xf7, 0x6f, 0x6b, 0xf0, 0xc8, + 0x76, 0xb6, 0x9e, 0x2b, 0xb6, 0x3e, 0x5c, 0x4c, 0xdd, 0x3f, 0x4a, 0x77, 0xe6, 0xe1, 0x1f, 0x47, + 0x56, 0xc1, 0x79, 0xbd, 0x42, 0xaf, 0xc0, 0xa2, 0xeb, 0x59, 0xa4, 0xd9, 0x6e, 0xe1, 0x75, 0x23, + 0xd8, 0xe9, 0x46, 0x5e, 0x9d, 0x29, 0xee, 0xe0, 0xbc, 0x9d, 0x82, 0xe1, 0x91, 0xda, 0x68, 0x17, + 0xd0, 0xc0, 0xb3, 0xd6, 0x76, 0x6d, 0x33, 0x8a, 0x02, 0x28, 0xee, 0x54, 0x65, 0xc1, 0xf9, 0x9d, + 0x11, 0x6c, 0x38, 0x83, 0x82, 0xfe, 0x3d, 0x0d, 0xce, 0xd1, 0x29, 0xe9, 0xf8, 0xde, 0xde, 0xfe, + 0xbb, 0x92, 0x1b, 0x9e, 0x81, 0xc9, 0xbe, 0x67, 0x45, 0x76, 0xe9, 0x45, 0xaa, 0x1c, 0xad, 0x7b, + 0x16, 0x79, 0xc0, 0x9d, 0xac, 0x7b, 0xfb, 0xf4, 0x07, 0x66, 0x55, 0xf4, 0x3f, 0xd5, 0xb8, 0xfa, + 0x12, 0xd9, 0x65, 0xef, 0x4e, 0xf6, 0xfe, 0x04, 0xcc, 0xd3, 0xb2, 0x75, 0x63, 0xaf, 0xd3, 0x7a, + 0xcd, 0x73, 0xa2, 0xb8, 0x14, 0x16, 0x2d, 0x7e, 0x53, 0x05, 0xe0, 0x64, 0x3d, 0xfd, 0x5b, 0xf3, + 0xc0, 0x2a, 0x38, 0x24, 0x7c, 0x57, 0x7e, 0xd8, 0xb3, 0x50, 0x33, 0x07, 0xc3, 0xe6, 0xb5, 0xee, + 0xab, 0x43, 0x2f, 0x34, 0x84, 0x03, 0x98, 0xa9, 0x24, 0xcd, 0xce, 0x46, 0x54, 0x8c, 0xd5, 0x3a, + 0x74, 0xe1, 0x98, 0x83, 0xa1, 0x10, 0x46, 0x1d, 0x35, 0x7c, 0x81, 0x2d, 0x9c, 0x66, 0x67, 0x23, + 0x01, 0xc3, 0x23, 0xb5, 0xd1, 0x97, 0x35, 0x98, 0x23, 0x82, 0xa9, 0x6f, 0x18, 0xbe, 0x25, 0xd6, + 0x4c, 0xf1, 0x0d, 0x45, 0x8e, 0x6e, 0xb4, 0x54, 0xb8, 0x32, 0xb7, 0xa6, 0xd0, 0xc0, 0x09, 0x8a, + 0x4c, 0xaf, 0x12, 0xbf, 0xe9, 0x64, 0x79, 0xd6, 0x75, 0xdf, 0x30, 0x89, 0x62, 0x8a, 0x4d, 0x09, + 0xbd, 0x2a, 0xaf, 0x12, 0xce, 0x6f, 0x8f, 0xfe, 0xb1, 0x06, 0x97, 0x24, 0xd4, 0x76, 0xed, 0xfe, + 0xb0, 0x8f, 0x89, 0xe9, 0x18, 0x76, 0x5f, 0x28, 0x59, 0x9f, 0x3d, 0xbd, 0x2f, 0x4d, 0xe2, 0xe7, + 0x9b, 0x56, 0x36, 0x0c, 0xe7, 0xf4, 0x09, 0xfd, 0xaa, 0x06, 0x4f, 0x46, 0xa0, 0x0e, 0x35, 0x75, + 0x86, 0x3e, 0x89, 0xa3, 0x9b, 0xc4, 0x98, 0x14, 0xb3, 0x07, 0x3f, 0x78, 0x78, 0x50, 0x7f, 0x72, + 0xed, 0x18, 0xdc, 0xf8, 0x58, 0xea, 0x09, 0x8e, 0xe9, 0x7a, 0x5b, 0xa1, 0xd0, 0xca, 0x1e, 0x1a, + 0xc7, 0x50, 0x1a, 0x38, 0x41, 0x11, 0xfd, 0x86, 0x06, 0x8f, 0xa8, 0x05, 0x2a, 0xc3, 0x70, 0x75, + 0xec, 0x73, 0xa7, 0xd7, 0x9b, 0x14, 0x01, 0xee, 0xd2, 0xc9, 0x01, 0xe2, 0xbc, 0x6e, 0xa1, 0xa7, + 0x60, 0xa6, 0xcf, 0x98, 0x93, 0xab, 0x6c, 0x53, 0xdc, 0x37, 0xc7, 0xf9, 0x35, 0xc0, 0x11, 0x8c, + 0x9a, 0x43, 0x03, 0xcf, 0xea, 0xd8, 0x56, 0x70, 0xcb, 0xee, 0xdb, 0xe1, 0x72, 0x8d, 0x5d, 0x2f, + 0x61, 0xe3, 0xd1, 0xf1, 0xac, 0x4e, 0xbb, 0xc5, 0xcb, 0x71, 0xa2, 0x16, 0x8b, 0xa0, 0xb5, 0xfb, + 0x46, 0x8f, 0x74, 0x86, 0x8e, 0xd3, 0xf1, 0x3d, 0x66, 0x24, 0xb7, 0x88, 0x61, 0x39, 0xb6, 0x4b, + 0x96, 0xe7, 0x8a, 0x47, 0xd0, 0xb6, 0xf3, 0x90, 0xe2, 0x7c, 0x7a, 0x68, 0x05, 0x60, 0xcb, 0xb0, + 0x9d, 0xee, 0x3d, 0x63, 0x70, 0xc7, 0x5d, 0x9e, 0x67, 0x62, 0x8c, 0x59, 0x22, 0xd7, 0x64, 0x29, + 0x56, 0x6a, 0x30, 0x86, 0xa2, 0xc2, 0x10, 0x13, 0x7e, 0x4f, 0x66, 0x79, 0xe1, 0xb4, 0x18, 0x2a, + 0xc2, 0xc8, 0x07, 0xf0, 0xa6, 0x42, 0x03, 0x27, 0x28, 0xa2, 0xaf, 0x6a, 0xb0, 0x10, 0xec, 0x07, + 0x21, 0xe9, 0xcb, 0x4e, 0x9c, 0x3b, 0xf5, 0x4e, 0x30, 0x0f, 0x42, 0x37, 0x41, 0x05, 0xa7, 0xa8, + 0xea, 0x07, 0x15, 0xae, 0x22, 0x8f, 0x70, 0x21, 0x7a, 0x09, 0xce, 0xf5, 0x49, 0xdf, 0xf3, 0xf7, + 0x57, 0xa3, 0x0b, 0xe4, 0xc2, 0xe7, 0xc1, 0x6c, 0x86, 0xf5, 0x24, 0x08, 0xa7, 0xeb, 0xd2, 0x9d, + 0x82, 0xcd, 0xd8, 0xb5, 0x6e, 0xdc, 0xbe, 0x12, 0xef, 0x14, 0xed, 0x14, 0x0c, 0x8f, 0xd4, 0x46, + 0x4d, 0x58, 0x12, 0x65, 0x6d, 0xaa, 0x7e, 0x05, 0xd7, 0x7c, 0x12, 0x1d, 0x47, 0x50, 0xad, 0x62, + 0xa9, 0x9d, 0x06, 0xe2, 0xd1, 0xfa, 0xf4, 0x2b, 0xe8, 0x0f, 0xb5, 0x17, 0x93, 0xf1, 0x57, 0xdc, + 0x4e, 0x82, 0x70, 0xba, 0x6e, 0xa4, 0x28, 0x26, 0xba, 0x30, 0x15, 0x7f, 0xc5, 0xed, 0x14, 0x0c, + 0x8f, 0xd4, 0xd6, 0xff, 0x74, 0x12, 0x3e, 0x30, 0x86, 0xf0, 0x46, 0xfd, 0xec, 0xe1, 0x3e, 0x66, + 0x1d, 0xad, 0x44, 0xe7, 0x17, 0x2b, 0xaf, 0x0e, 0x0d, 0x37, 0xb4, 0xc3, 0xfd, 0x31, 0xa7, 0x67, + 0x90, 0x33, 0x3d, 0x27, 0xa7, 0x37, 0xee, 0x74, 0x06, 0x79, 0xd3, 0x79, 0x72, 0x92, 0xe3, 0x4f, + 0x7f, 0x3f, 0x7b, 0xfa, 0x0b, 0x8e, 0xea, 0xb1, 0xec, 0x32, 0xc8, 0x61, 0x97, 0x82, 0xa3, 0x3a, + 0x06, 0x7b, 0xfd, 0x87, 0x49, 0xf8, 0xe0, 0x38, 0xbb, 0x48, 0x41, 0xfe, 0xca, 0x90, 0xd3, 0x0f, + 0x95, 0xbf, 0xf2, 0x42, 0x4e, 0x1e, 0x22, 0x7f, 0x65, 0x90, 0x7c, 0xd8, 0xfc, 0x95, 0x37, 0xaa, + 0x0f, 0x8b, 0xbf, 0xf2, 0x46, 0x75, 0x0c, 0xfe, 0xfa, 0x3f, 0xe9, 0xfd, 0x41, 0x6e, 0x61, 0x6d, + 0x98, 0x30, 0x07, 0xc3, 0x82, 0x42, 0x8a, 0x1d, 0xa9, 0x35, 0x3b, 0x1b, 0x98, 0xe2, 0x40, 0x18, + 0xa6, 0x39, 0xff, 0x14, 0x14, 0x41, 0x2c, 0x5c, 0x88, 0xb3, 0x24, 0x16, 0x98, 0xe8, 0x50, 0x91, + 0xc1, 0x36, 0xe9, 0x13, 0xdf, 0x70, 0xba, 0xa1, 0xe7, 0x1b, 0xbd, 0xa2, 0xd2, 0x86, 0x0d, 0xd5, + 0x5a, 0x0a, 0x17, 0x1e, 0xc1, 0x4e, 0x07, 0x64, 0x60, 0x5b, 0x05, 0xe5, 0x0b, 0x1b, 0x90, 0x4e, + 0xbb, 0x85, 0x29, 0x0e, 0xfd, 0x7b, 0x55, 0x50, 0xee, 0x57, 0x50, 0x83, 0xc5, 0x70, 0x1c, 0xef, + 0x5e, 0xc7, 0xb7, 0x77, 0x6d, 0x87, 0xf4, 0x88, 0x25, 0x03, 0xf0, 0x03, 0x71, 0xf2, 0xca, 0xb4, + 0xa7, 0xd5, 0xbc, 0x4a, 0x38, 0xbf, 0x3d, 0xd5, 0xe5, 0x96, 0xcc, 0xf4, 0x45, 0xad, 0x52, 0x47, + 0x2d, 0x23, 0xd7, 0xbe, 0xf8, 0x82, 0x1a, 0x29, 0xc6, 0xa3, 0x74, 0xd1, 0x5f, 0xd5, 0xb8, 0xb5, + 0x2d, 0x5d, 0x76, 0x62, 0xd2, 0x6e, 0x9c, 0x96, 0x97, 0x38, 0xb6, 0xdb, 0x63, 0xaf, 0x60, 0x92, + 0x22, 0xfa, 0x96, 0x06, 0x17, 0x77, 0xb2, 0xbc, 0x6f, 0x62, 0x6e, 0x3b, 0x85, 0xfb, 0x92, 0xe3, + 0xcf, 0xe3, 0xe7, 0x87, 0x99, 0x15, 0x70, 0x76, 0x4f, 0xe4, 0x38, 0x49, 0xd7, 0x89, 0x90, 0x03, + 0xc5, 0xc7, 0x29, 0xe5, 0x84, 0x89, 0xc7, 0x49, 0x02, 0x70, 0x92, 0x22, 0x7a, 0x0b, 0x66, 0x77, + 0x22, 0x57, 0x94, 0x30, 0x6e, 0x5b, 0x85, 0xc9, 0x2b, 0x0e, 0x2d, 0x7e, 0x52, 0x29, 0x0b, 0x71, + 0x4c, 0x05, 0xd9, 0x30, 0xb3, 0xc3, 0xa5, 0x91, 0x30, 0x4a, 0x1b, 0xe5, 0xf5, 0x65, 0x6e, 0x19, + 0x89, 0x22, 0x1c, 0xe1, 0x57, 0xa3, 0x38, 0xaa, 0xc7, 0xc4, 0x42, 0x7d, 0x43, 0x83, 0x8b, 0xbb, + 0xc4, 0x0f, 0x6d, 0x33, 0xed, 0xfe, 0x9c, 0x2d, 0xa1, 0xd4, 0xbf, 0x96, 0x85, 0x91, 0xb3, 0x4a, + 0x26, 0x08, 0x67, 0xf7, 0x41, 0xff, 0x2f, 0x1a, 0x8c, 0x78, 0x83, 0xd0, 0x2f, 0x6a, 0x30, 0xb7, + 0x45, 0x8c, 0x70, 0xe8, 0x93, 0xeb, 0x46, 0x28, 0xa3, 0x74, 0x5f, 0x3f, 0x15, 0x2f, 0xd4, 0xca, + 0x35, 0x05, 0x33, 0x3f, 0x88, 0x91, 0x17, 0x83, 0x55, 0x10, 0x4e, 0x74, 0xe1, 0xf2, 0xa7, 0x61, + 0x69, 0xa4, 0xe1, 0x89, 0xfc, 0xfd, 0xbf, 0x23, 0xfc, 0x89, 0xe9, 0x44, 0x70, 0x6f, 0xc0, 0x94, + 0x61, 0x59, 0x32, 0xd9, 0xcc, 0x0b, 0x05, 0x0f, 0x1d, 0x2d, 0x35, 0x1c, 0x9a, 0xfd, 0xc4, 0x1c, + 0x2f, 0xba, 0x06, 0xc8, 0x48, 0x1c, 0xaf, 0xad, 0x7b, 0x56, 0x64, 0x2e, 0x31, 0xef, 0xee, 0xea, + 0x08, 0x14, 0x67, 0xb4, 0xd0, 0x3f, 0x05, 0x0b, 0xc9, 0xeb, 0x76, 0x27, 0x88, 0xc8, 0xd3, 0xff, + 0xba, 0x06, 0x68, 0xf4, 0x2a, 0x3a, 0x0a, 0xa0, 0x2a, 0x6a, 0x44, 0x93, 0x5c, 0xcc, 0xd5, 0x98, + 0x0e, 0x16, 0x8c, 0x23, 0xd2, 0x45, 0x41, 0x80, 0x25, 0x21, 0xfd, 0xcf, 0x34, 0x88, 0x93, 0x5a, + 0xa0, 0x8f, 0x43, 0xcd, 0x22, 0x81, 0xe9, 0xdb, 0x83, 0x30, 0xfe, 0x10, 0x79, 0x85, 0xb8, 0x15, + 0x83, 0xb0, 0x5a, 0x0f, 0xe9, 0x30, 0x1d, 0x1a, 0xc1, 0x4e, 0xbb, 0x25, 0x0c, 0x47, 0xb6, 0xcd, + 0xdf, 0x65, 0x25, 0x58, 0x40, 0xe2, 0xbb, 0x6b, 0x13, 0x63, 0xdc, 0x5d, 0x43, 0x5b, 0xa7, 0x70, + 0x51, 0x0f, 0x1d, 0x7f, 0x49, 0x4f, 0xff, 0xb7, 0x15, 0x48, 0x26, 0x12, 0x29, 0x3a, 0x04, 0xa3, + 0x37, 0x0b, 0x2b, 0x0f, 0xed, 0x66, 0xe1, 0x47, 0x58, 0xd2, 0x2d, 0x9e, 0x2a, 0x91, 0x1f, 0x86, + 0xa8, 0x99, 0xb2, 0x78, 0x9e, 0x43, 0x59, 0x03, 0x3d, 0xaf, 0xc6, 0xfe, 0xcd, 0x36, 0x3e, 0x10, + 0xad, 0x0b, 0x16, 0xd0, 0xf7, 0x40, 0xdc, 0x9d, 0x94, 0xdf, 0x9f, 0x08, 0xf3, 0xfb, 0xb8, 0x88, + 0x5c, 0x9b, 0x4a, 0xdc, 0xef, 0x8c, 0x2e, 0x85, 0x2e, 0x25, 0x1a, 0xc6, 0xe1, 0x6c, 0xfa, 0xd7, + 0x34, 0x98, 0x11, 0x69, 0x05, 0xc6, 0x08, 0xd9, 0xec, 0xc1, 0x14, 0x53, 0xd7, 0x4b, 0x69, 0x32, + 0xdd, 0x6d, 0xcf, 0x0b, 0x13, 0xe9, 0x15, 0x58, 0xa8, 0x18, 0xfb, 0x17, 0x73, 0xfc, 0xfa, 0x37, + 0x26, 0xe1, 0x49, 0x51, 0x65, 0x64, 0x9b, 0x96, 0x8b, 0x70, 0x1f, 0xce, 0x8b, 0x69, 0x6a, 0xf9, + 0x86, 0x2d, 0xcf, 0x8b, 0x8a, 0x59, 0x60, 0xe2, 0x2c, 0x71, 0x04, 0x1d, 0xce, 0xa2, 0x81, 0x7e, + 0x0a, 0x2e, 0x88, 0xe2, 0x1b, 0xc4, 0x70, 0xc2, 0xed, 0x88, 0x76, 0x31, 0x6b, 0x8c, 0x1d, 0x60, + 0xaf, 0x67, 0xe0, 0xc3, 0x99, 0x54, 0x58, 0xb4, 0x97, 0x00, 0x34, 0x7d, 0x62, 0xa8, 0x87, 0x65, + 0x25, 0xa2, 0xbd, 0xd6, 0x33, 0x31, 0xe2, 0x1c, 0x4a, 0xcc, 0x95, 0x65, 0xec, 0x31, 0xcb, 0x18, + 0x93, 0xd0, 0xb7, 0x59, 0xc2, 0x0b, 0xca, 0xe0, 0xdc, 0x96, 0x4d, 0x82, 0x70, 0xba, 0x2e, 0x7a, + 0x01, 0x16, 0xd8, 0xf9, 0x5f, 0x7c, 0x5d, 0x6b, 0x2a, 0xce, 0x9c, 0x79, 0x3b, 0x01, 0xc1, 0xa9, + 0x9a, 0xfa, 0xaf, 0x6b, 0x30, 0xa7, 0x32, 0xd0, 0x18, 0x81, 0x9d, 0xbb, 0x8a, 0xc0, 0x2e, 0x13, + 0x2d, 0xa9, 0x92, 0x1d, 0x53, 0x66, 0x9f, 0xcf, 0x68, 0xc3, 0x0e, 0xad, 0x48, 0x4a, 0xf8, 0x97, + 0x3a, 0xb4, 0x1a, 0xd9, 0x49, 0xe4, 0xa1, 0x55, 0x1a, 0x82, 0x47, 0x08, 0xa3, 0xd7, 0x61, 0xc2, + 0xf4, 0x6d, 0x31, 0x30, 0x9f, 0x2c, 0x66, 0x9f, 0xe0, 0x76, 0x1c, 0x3b, 0xdf, 0xc4, 0x6d, 0x4c, + 0x31, 0xea, 0xbf, 0x3d, 0x01, 0x35, 0x25, 0x6b, 0x09, 0x5a, 0x2f, 0x63, 0xdf, 0xc6, 0xe8, 0x23, + 0x1b, 0x77, 0x1d, 0x26, 0x7a, 0x83, 0x61, 0x41, 0x03, 0x57, 0xa2, 0xbb, 0x4e, 0xd1, 0xf5, 0x06, + 0x43, 0xf4, 0x9a, 0x34, 0x99, 0x8b, 0x19, 0xb5, 0x32, 0xe0, 0x2e, 0x65, 0x36, 0x47, 0xec, 0x39, + 0x99, 0xcb, 0x9e, 0x2e, 0xcc, 0x04, 0xc2, 0x9e, 0x9e, 0x2a, 0x91, 0x34, 0x47, 0x19, 0x6a, 0x61, + 0x40, 0x73, 0x3d, 0x3c, 0xb2, 0xaf, 0x23, 0x22, 0x54, 0x0b, 0x18, 0xb2, 0x88, 0x77, 0x66, 0x62, + 0x54, 0xb9, 0x16, 0xb0, 0xc1, 0x4a, 0xb0, 0x80, 0xe8, 0xff, 0x4c, 0x03, 0x34, 0x8a, 0x10, 0x7d, + 0x00, 0xa6, 0x58, 0xe0, 0xbf, 0x58, 0x6c, 0x4a, 0xa6, 0x04, 0x23, 0x08, 0x30, 0x87, 0xa1, 0xd7, + 0xc5, 0xad, 0x8e, 0x62, 0x33, 0x23, 0xb7, 0x70, 0x41, 0x53, 0xb9, 0x06, 0x12, 0xed, 0x51, 0x13, + 0x79, 0x7b, 0x94, 0xfe, 0xc3, 0x0a, 0x65, 0x39, 0xdb, 0x0d, 0x89, 0xcb, 0xe2, 0x30, 0xef, 0x03, + 0x18, 0xc3, 0xd0, 0xe3, 0x7b, 0xb2, 0xe0, 0xbc, 0xcf, 0x14, 0x1c, 0x5d, 0x89, 0x75, 0x55, 0x62, + 0xe4, 0x87, 0x22, 0xf1, 0x6f, 0xac, 0x50, 0xa3, 0xb4, 0x43, 0xbb, 0x4f, 0x5e, 0xb7, 0x5d, 0xcb, + 0xbb, 0x27, 0x06, 0xa3, 0x34, 0xed, 0xbb, 0x12, 0x23, 0xa7, 0x1d, 0xff, 0xc6, 0x0a, 0x35, 0xf4, + 0x59, 0x58, 0x66, 0xf9, 0x77, 0x5d, 0x96, 0xc3, 0x49, 0x74, 0xce, 0x73, 0x9c, 0x68, 0x97, 0xa8, + 0x36, 0x1e, 0x3f, 0x3c, 0xa8, 0x2f, 0x37, 0x73, 0xea, 0xe0, 0xdc, 0xd6, 0xfa, 0xb7, 0x35, 0xb8, + 0x98, 0x39, 0x16, 0xe8, 0x3a, 0x2c, 0xc5, 0x07, 0xe2, 0xaa, 0x54, 0xab, 0xc6, 0x49, 0xc9, 0x6e, + 0xa6, 0x2b, 0xe0, 0xd1, 0x36, 0x68, 0x5d, 0x6e, 0xed, 0xaa, 0xd4, 0x14, 0xa7, 0xe9, 0x8f, 0x09, + 0x54, 0x59, 0x82, 0x15, 0x67, 0xb5, 0xd3, 0xbf, 0x90, 0xe8, 0x70, 0x3c, 0x60, 0x94, 0x99, 0x37, + 0x49, 0x4f, 0x46, 0xcc, 0x4a, 0x66, 0x6e, 0xd0, 0x42, 0xcc, 0x61, 0xe8, 0x09, 0x35, 0x6e, 0x5d, + 0x4a, 0x8d, 0x28, 0x76, 0x5d, 0x1f, 0x02, 0xac, 0x7b, 0xae, 0x1d, 0x7a, 0xbe, 0xed, 0xf6, 0x50, + 0x0f, 0xaa, 0x86, 0xc8, 0x0e, 0x2d, 0x98, 0xed, 0xa5, 0x62, 0x96, 0x91, 0x40, 0xc2, 0x23, 0xb9, + 0xa2, 0x5f, 0x58, 0x22, 0xd7, 0xff, 0xa1, 0x06, 0x97, 0xb2, 0xef, 0x64, 0x8c, 0xb1, 0x1d, 0xf6, + 0xa1, 0xe6, 0xc7, 0xcd, 0x04, 0x67, 0xfe, 0x84, 0x7a, 0x3b, 0x5b, 0xc9, 0x16, 0x4f, 0x55, 0x85, + 0xa6, 0xef, 0x05, 0xd1, 0xec, 0xa4, 0x2f, 0x6c, 0xcb, 0xe5, 0xaa, 0xf4, 0x04, 0xab, 0xf8, 0xf5, + 0xaf, 0x54, 0x00, 0x6e, 0x93, 0xf0, 0x9e, 0xe7, 0xef, 0xd0, 0x31, 0x7a, 0x57, 0xdd, 0x0d, 0x7a, + 0x1c, 0x26, 0x07, 0x9e, 0x15, 0x08, 0x89, 0xc2, 0x2e, 0xed, 0xb0, 0xd3, 0x5c, 0x56, 0x8a, 0xea, + 0x30, 0xc5, 0xfc, 0xb6, 0x42, 0x76, 0x33, 0x4d, 0x95, 0x6a, 0x27, 0x01, 0xe6, 0xe5, 0xe8, 0x69, + 0xa8, 0x8a, 0x20, 0xc3, 0x40, 0xe8, 0xde, 0x6c, 0xc2, 0x44, 0x38, 0x62, 0x80, 0x25, 0x54, 0xff, + 0x99, 0x49, 0x48, 0xe4, 0x37, 0x8f, 0x2d, 0xe8, 0xc9, 0x87, 0x64, 0x41, 0x7f, 0x16, 0x96, 0x1d, + 0xcf, 0xb0, 0x1a, 0x86, 0x43, 0xd9, 0xde, 0xef, 0xf2, 0xf9, 0x30, 0xdc, 0x1e, 0x89, 0xf2, 0x94, + 0x33, 0x11, 0x70, 0x2b, 0xa7, 0x0e, 0xce, 0x6d, 0x8d, 0x86, 0x4a, 0x5a, 0x75, 0xaa, 0x33, 0xac, + 0x97, 0xce, 0xff, 0xbe, 0xa2, 0x46, 0x98, 0xca, 0x8d, 0x34, 0x99, 0x7a, 0x1d, 0x7d, 0x55, 0x83, + 0x8b, 0x64, 0x2f, 0x24, 0xbe, 0x6b, 0x38, 0x77, 0x7d, 0x63, 0x6b, 0xcb, 0x36, 0x13, 0xf1, 0x32, + 0x9d, 0xc3, 0x83, 0xfa, 0xc5, 0xb5, 0xac, 0x0a, 0x0f, 0x0e, 0xea, 0xcf, 0x8d, 0x3e, 0x70, 0x10, + 0xc5, 0x93, 0x66, 0x36, 0x61, 0xec, 0x98, 0x4d, 0xee, 0xf2, 0xf3, 0x50, 0x3b, 0x41, 0xfc, 0xe4, + 0xac, 0xea, 0x4f, 0xf9, 0xe5, 0x69, 0x50, 0xa2, 0x79, 0x4f, 0x90, 0x5b, 0xef, 0xef, 0x6b, 0x70, + 0xc1, 0x74, 0x6c, 0xe2, 0x86, 0xa9, 0xa8, 0x65, 0xbe, 0x34, 0x5e, 0x2b, 0x16, 0x67, 0x3c, 0x20, + 0x6e, 0xbb, 0xd5, 0xf4, 0x5c, 0x97, 0x98, 0x61, 0x33, 0x03, 0x3b, 0x37, 0x4e, 0xb2, 0x20, 0x38, + 0xb3, 0x37, 0xec, 0x83, 0x58, 0x79, 0xbb, 0xa5, 0x5e, 0xe5, 0x69, 0x8a, 0x32, 0x2c, 0xa1, 0xe8, + 0x59, 0xa8, 0xf5, 0x7c, 0x6f, 0x38, 0x08, 0x9a, 0x2c, 0x92, 0x87, 0xaf, 0x30, 0x16, 0x2a, 0x75, + 0x3d, 0x2e, 0xc6, 0x6a, 0x1d, 0xf4, 0x31, 0x98, 0xe3, 0x3f, 0x3b, 0x3e, 0xd9, 0xb2, 0xf7, 0xc4, + 0x8a, 0x63, 0x81, 0x01, 0xd7, 0x95, 0x72, 0x9c, 0xa8, 0x85, 0x3e, 0x0c, 0xb3, 0x76, 0x10, 0x0c, + 0x89, 0xbf, 0x81, 0x6f, 0x89, 0xc4, 0x3d, 0xcc, 0x1b, 0xda, 0x8e, 0x0a, 0x71, 0x0c, 0x47, 0xbf, + 0xa4, 0xc1, 0x82, 0x4f, 0xde, 0x1a, 0xda, 0x3e, 0xb1, 0x18, 0xd1, 0x40, 0xc4, 0x54, 0x77, 0x4b, + 0x06, 0x72, 0xaf, 0xe0, 0x04, 0x56, 0xce, 0xe9, 0xd2, 0x8f, 0x90, 0x04, 0xe2, 0x54, 0x17, 0xe8, + 0x58, 0x05, 0x76, 0xcf, 0xb5, 0xdd, 0xde, 0xaa, 0xd3, 0x0b, 0x96, 0xab, 0x6c, 0xf5, 0xb2, 0xb1, + 0xea, 0xc6, 0xc5, 0x58, 0xad, 0x83, 0x3e, 0x01, 0xf3, 0xc3, 0x80, 0xf2, 0x6e, 0x9f, 0xf0, 0x01, + 0x9e, 0x8d, 0x43, 0xec, 0x36, 0x54, 0x00, 0x4e, 0xd6, 0xa3, 0xa6, 0x59, 0x54, 0x20, 0x86, 0x19, + 0xf8, 0xe5, 0x5c, 0xda, 0xcf, 0x8d, 0x04, 0x04, 0xa7, 0x6a, 0x5e, 0x5e, 0x85, 0xf3, 0x19, 0x9f, + 0x79, 0xa2, 0x05, 0xf2, 0xeb, 0x15, 0x78, 0xff, 0xb1, 0x6c, 0x89, 0x7e, 0x59, 0x83, 0x1a, 0xd9, + 0x0b, 0x7d, 0x43, 0x06, 0xfc, 0xd1, 0x39, 0xea, 0x3d, 0x9c, 0x45, 0xb0, 0xb2, 0x16, 0x53, 0xe2, + 0xf3, 0x26, 0xf7, 0x3c, 0x05, 0x82, 0xd5, 0x0e, 0x51, 0x15, 0x9b, 0x5f, 0xde, 0x56, 0x1d, 0x6d, + 0x22, 0x21, 0xb4, 0x80, 0x5c, 0x7e, 0x19, 0x16, 0xd3, 0x98, 0x4f, 0x34, 0x54, 0xbf, 0x55, 0x81, + 0xa9, 0x8e, 0x63, 0xb8, 0x67, 0xf1, 0xd4, 0xc0, 0x17, 0x13, 0xf9, 0x3f, 0x8a, 0x65, 0x55, 0x61, + 0x7d, 0xcd, 0xcd, 0x0f, 0xb4, 0x9d, 0xca, 0x0f, 0xf4, 0x4a, 0x09, 0x1a, 0x47, 0xa7, 0x03, 0xfa, + 0x9e, 0x06, 0xb3, 0xac, 0xde, 0x19, 0xe4, 0x11, 0x79, 0x23, 0x99, 0x47, 0xe4, 0x85, 0xe2, 0x1f, + 0x95, 0x93, 0x36, 0xe4, 0x8f, 0xa3, 0x8f, 0x61, 0x59, 0x42, 0x3e, 0xa7, 0x66, 0xc0, 0xe7, 0x5f, + 0xf3, 0x74, 0x56, 0xd6, 0x9d, 0x5b, 0x9e, 0x69, 0x38, 0x69, 0x4d, 0xee, 0xe8, 0x34, 0xf8, 0x2e, + 0xcc, 0x12, 0x71, 0xab, 0x3e, 0xfa, 0x9a, 0x62, 0xba, 0x6d, 0x74, 0x37, 0x3f, 0xa6, 0x17, 0x95, + 0x04, 0x38, 0x26, 0xa1, 0xff, 0x4e, 0x05, 0x6a, 0xca, 0x6c, 0xbe, 0x23, 0xd9, 0x7d, 0xae, 0x65, + 0xe6, 0x8b, 0xae, 0xb0, 0x80, 0xbe, 0x4b, 0x27, 0x48, 0x36, 0x1f, 0x40, 0xcd, 0x8c, 0xd3, 0x15, + 0x96, 0x62, 0x70, 0x25, 0xed, 0xa1, 0x08, 0x2c, 0x8e, 0x0b, 0xb0, 0x4a, 0x45, 0xff, 0x17, 0x15, + 0x98, 0xe9, 0xf8, 0x1e, 0x9d, 0xe3, 0x33, 0x10, 0x10, 0x9b, 0x09, 0x01, 0x51, 0x70, 0xf1, 0xf2, + 0xde, 0xe6, 0x8a, 0x88, 0x37, 0x53, 0x22, 0xa2, 0x51, 0x8a, 0xca, 0xd1, 0x42, 0xe2, 0x0f, 0x35, + 0xa8, 0x89, 0x9a, 0x67, 0x20, 0x26, 0x8c, 0xa4, 0x98, 0x78, 0xb1, 0xcc, 0x87, 0xe5, 0x08, 0x8a, + 0xbf, 0xa7, 0xc1, 0xbc, 0xa8, 0xb1, 0x4e, 0xfa, 0x9b, 0xc4, 0x47, 0xd7, 0x60, 0x26, 0x18, 0xb2, + 0xb9, 0x14, 0x5f, 0xf4, 0x98, 0x2a, 0x2a, 0xfc, 0x4d, 0xc3, 0x64, 0xef, 0x1e, 0xf0, 0x2a, 0x4a, + 0x62, 0x2e, 0x5e, 0x80, 0xa3, 0xc6, 0xd4, 0xa0, 0xf3, 0x3d, 0x67, 0xe4, 0xda, 0x3c, 0xf6, 0x1c, + 0x82, 0x19, 0x84, 0xda, 0x51, 0xf4, 0x6f, 0x74, 0xa4, 0xc4, 0xec, 0x28, 0x0a, 0x0e, 0x30, 0x2f, + 0xd7, 0x7f, 0x6e, 0x52, 0x8e, 0x36, 0x93, 0x63, 0x37, 0x60, 0xd6, 0xf4, 0x89, 0x11, 0x12, 0xab, + 0xb1, 0x3f, 0x4e, 0xe7, 0x98, 0x42, 0xd7, 0x8c, 0x5a, 0xe0, 0xb8, 0x31, 0x55, 0x9d, 0xd4, 0x53, + 0xa2, 0x4a, 0xac, 0x66, 0xe6, 0x9e, 0x10, 0xbd, 0x08, 0x53, 0xde, 0x3d, 0x57, 0xc6, 0x49, 0x1c, + 0x49, 0x98, 0x7d, 0xca, 0x1d, 0x5a, 0x1b, 0xf3, 0x46, 0x6a, 0xa2, 0x87, 0xc9, 0x23, 0x12, 0x3d, + 0xf4, 0x61, 0xa6, 0xcf, 0xa6, 0x21, 0x4a, 0xcb, 0x50, 0x8a, 0x99, 0xf9, 0x8c, 0xaa, 0x09, 0x29, + 0x19, 0x6a, 0x1c, 0xd1, 0xa0, 0x4a, 0x30, 0xd5, 0xd3, 0x82, 0x81, 0x61, 0x12, 0x55, 0x09, 0xbe, + 0x1d, 0x15, 0xe2, 0x18, 0x8e, 0xee, 0x43, 0x8d, 0xdf, 0x64, 0xe6, 0xb2, 0x76, 0xa6, 0x84, 0x4f, + 0x52, 0xf4, 0xef, 0x6e, 0x8c, 0x8e, 0x0f, 0xbe, 0x52, 0x80, 0x55, 0x62, 0xfa, 0x2f, 0x4c, 0x48, + 0x36, 0x15, 0x82, 0x3f, 0x3b, 0x69, 0xbf, 0x56, 0xe8, 0xd5, 0x8f, 0xe7, 0x60, 0x6a, 0xb0, 0x6d, + 0x04, 0x11, 0xaf, 0x46, 0xd9, 0x53, 0xa7, 0x3a, 0xb4, 0xf0, 0xc1, 0x41, 0x7d, 0x4e, 0x90, 0x66, + 0xbf, 0x31, 0xaf, 0x8b, 0x86, 0x70, 0x3e, 0x08, 0x0d, 0x87, 0x74, 0x6d, 0xe1, 0x3e, 0x0a, 0x42, + 0xa3, 0x3f, 0x28, 0x90, 0x04, 0x95, 0x9d, 0x32, 0x75, 0x47, 0x51, 0xe1, 0x2c, 0xfc, 0xe8, 0x67, + 0x34, 0x58, 0x66, 0xe5, 0xab, 0xc3, 0xd0, 0xe3, 0x99, 0x8d, 0x63, 0xe2, 0x27, 0x3f, 0x64, 0x65, + 0xc6, 0x7e, 0x37, 0x07, 0x1f, 0xce, 0xa5, 0xa4, 0xff, 0x5f, 0x0d, 0xd0, 0xe8, 0x2c, 0xa2, 0x3e, + 0x54, 0x2d, 0xb2, 0x65, 0x0c, 0x9d, 0x30, 0xda, 0x8c, 0x8b, 0x5d, 0x75, 0x8d, 0x71, 0xc6, 0x02, + 0xb2, 0x25, 0x10, 0x63, 0x49, 0x02, 0x0d, 0x60, 0xf6, 0xde, 0xb6, 0x1d, 0x12, 0xc7, 0x0e, 0x42, + 0x21, 0x24, 0x4b, 0xd3, 0x93, 0xfa, 0xc7, 0xeb, 0x11, 0x66, 0x1c, 0x13, 0xd1, 0xff, 0xda, 0x04, + 0x54, 0x4f, 0xf0, 0x38, 0xd3, 0x10, 0x90, 0xb8, 0xe8, 0x4e, 0x95, 0x16, 0x52, 0xc6, 0x6f, 0xc5, + 0x34, 0x8b, 0xe6, 0x08, 0x32, 0x9c, 0x41, 0x00, 0xbd, 0x0d, 0x17, 0x6c, 0x77, 0xcb, 0x37, 0x82, + 0xd0, 0x1f, 0x9a, 0xe1, 0xd0, 0x8f, 0x08, 0x17, 0x4a, 0x50, 0xc2, 0x8c, 0xfd, 0x76, 0x06, 0x3a, + 0x9c, 0x49, 0x04, 0x6d, 0xc1, 0xcc, 0x3d, 0xcf, 0xdf, 0xa1, 0x32, 0x6c, 0xb2, 0x44, 0x2e, 0xf8, + 0xd7, 0x19, 0x8e, 0x58, 0x78, 0xf1, 0xdf, 0x01, 0x8e, 0x90, 0xeb, 0x7f, 0xa0, 0xc1, 0x14, 0xbf, + 0x2c, 0xf5, 0xde, 0x30, 0x74, 0x58, 0x5f, 0x73, 0xf3, 0x1a, 0x52, 0xf3, 0x83, 0xd5, 0x78, 0xaf, + 0x98, 0x1f, 0xac, 0xb3, 0x39, 0x5a, 0xc5, 0x1f, 0x4c, 0x88, 0x8f, 0x61, 0xdb, 0x76, 0x1b, 0xce, + 0x0b, 0x0d, 0xf4, 0x96, 0xbd, 0x45, 0x28, 0x83, 0xb5, 0x8c, 0xfd, 0x40, 0x5c, 0x00, 0x66, 0xd2, + 0xaf, 0x39, 0x0a, 0xc6, 0x59, 0x6d, 0xd0, 0xbf, 0xd4, 0xe8, 0x06, 0x19, 0xfa, 0xb6, 0x59, 0x2e, + 0x07, 0xa3, 0xec, 0xdc, 0xca, 0x3a, 0xc7, 0xc6, 0x2d, 0xf8, 0x8d, 0x78, 0xa7, 0x64, 0xa5, 0x0f, + 0x0e, 0xea, 0xf5, 0x0c, 0x0f, 0x61, 0xe4, 0xd3, 0xa6, 0x43, 0xfb, 0x95, 0x1f, 0x1e, 0x59, 0x85, + 0xf9, 0xd7, 0xa3, 0x2e, 0xa3, 0x1b, 0x30, 0x15, 0x98, 0xde, 0x80, 0x1c, 0xf5, 0x0c, 0x59, 0xda, + 0xfe, 0x92, 0x23, 0xdc, 0xa5, 0x2d, 0x31, 0x47, 0x70, 0xf9, 0x4d, 0x98, 0x53, 0x7b, 0x9e, 0xe1, + 0x21, 0x68, 0xa9, 0x1e, 0x82, 0x13, 0x9f, 0xb7, 0xa9, 0x1e, 0x85, 0xdf, 0xad, 0x80, 0x78, 0xc9, + 0x65, 0x8c, 0x53, 0x84, 0x37, 0xa3, 0x9c, 0x73, 0x65, 0x9e, 0xaf, 0x49, 0xbf, 0x09, 0x19, 0x0f, + 0x82, 0x9a, 0x76, 0x0e, 0x79, 0x30, 0xed, 0x18, 0x9b, 0xc4, 0x89, 0x5e, 0xf5, 0xb8, 0x5e, 0xe2, + 0xd1, 0x09, 0x9e, 0xa7, 0x39, 0x48, 0xf9, 0x9a, 0x79, 0x21, 0x16, 0x64, 0x2e, 0x3f, 0x0f, 0x35, + 0xa5, 0xda, 0x89, 0xdc, 0x32, 0x7f, 0xa8, 0xc1, 0x5c, 0x22, 0xf1, 0x54, 0x1f, 0x26, 0x7c, 0x69, + 0x8e, 0x17, 0x3d, 0x66, 0x89, 0xa2, 0x81, 0x1e, 0x3b, 0xa2, 0x12, 0xa6, 0x74, 0x64, 0x8e, 0xaa, + 0xca, 0x69, 0xe5, 0xa8, 0xfa, 0xba, 0x06, 0x97, 0xa2, 0x0f, 0x4a, 0x26, 0x36, 0x40, 0x4f, 0x43, + 0xd5, 0x18, 0xd8, 0xcc, 0xf9, 0xaa, 0xfa, 0xaf, 0x57, 0x3b, 0x6d, 0x56, 0x86, 0x25, 0x14, 0x7d, + 0x04, 0xaa, 0x11, 0xeb, 0x09, 0xe5, 0x4b, 0xca, 0x2d, 0x79, 0x70, 0x24, 0x6b, 0xa0, 0xa7, 0x94, + 0xbc, 0x80, 0x53, 0xf1, 0x46, 0x2d, 0x09, 0xf3, 0xf3, 0x60, 0xfd, 0x6b, 0x15, 0x98, 0xe7, 0xce, + 0x8a, 0x86, 0xed, 0x5a, 0xb6, 0xdb, 0x3b, 0x83, 0x0d, 0x22, 0xf1, 0xc8, 0x60, 0xe5, 0xb4, 0x1e, + 0x19, 0xbc, 0x09, 0xd3, 0x6f, 0x51, 0x49, 0x15, 0x31, 0xf8, 0x58, 0x02, 0x43, 0x32, 0x2f, 0x13, + 0x72, 0x01, 0x16, 0x28, 0xf4, 0x3f, 0xd1, 0x60, 0x29, 0x31, 0x2c, 0x67, 0xb0, 0xd3, 0xf4, 0x92, + 0x3b, 0x4d, 0xa3, 0x60, 0x82, 0x0f, 0xa5, 0xd3, 0x39, 0x3b, 0xce, 0x6f, 0x56, 0x80, 0x3d, 0x69, + 0x76, 0x06, 0x53, 0xfd, 0x46, 0x42, 0x17, 0x78, 0xa9, 0xf8, 0xdb, 0x33, 0x79, 0x0e, 0x8d, 0x5e, + 0xca, 0xa1, 0xf1, 0xe9, 0xe2, 0x24, 0x8e, 0xf6, 0x66, 0xfc, 0x4a, 0x05, 0x80, 0x56, 0xe3, 0x8f, + 0xbf, 0x89, 0xb8, 0xc4, 0xf8, 0x31, 0xd0, 0xd9, 0x77, 0xcb, 0x0b, 0x9e, 0xba, 0x7c, 0x5f, 0x6c, + 0x22, 0x76, 0x9b, 0x27, 0xdf, 0x16, 0x4b, 0x2e, 0xc0, 0xc9, 0x53, 0x5a, 0x80, 0xfa, 0x6f, 0x6a, + 0xc0, 0x52, 0x8e, 0xb7, 0x6e, 0x77, 0xd1, 0x27, 0x60, 0xde, 0xe6, 0xe7, 0x92, 0x2d, 0x35, 0xa3, + 0x16, 0x3b, 0x3b, 0x69, 0xab, 0x00, 0x9c, 0xac, 0x87, 0x5c, 0x65, 0x5c, 0xcb, 0xbc, 0xbc, 0x28, + 0x3a, 0x22, 0x73, 0xea, 0xce, 0x65, 0xcf, 0x8c, 0xfe, 0xa3, 0x0a, 0x9c, 0x4b, 0xd5, 0x1d, 0xc3, + 0x54, 0x79, 0x38, 0x12, 0x4c, 0x49, 0xb3, 0x3b, 0x71, 0x06, 0x69, 0x76, 0x65, 0xc6, 0xdb, 0xc9, + 0x87, 0x9c, 0xf1, 0xf6, 0xbb, 0x1a, 0x54, 0xe9, 0x18, 0x9f, 0x81, 0x0c, 0xfd, 0xc9, 0xa4, 0x0c, + 0x7d, 0xbe, 0x30, 0xef, 0xe4, 0x88, 0xce, 0x3f, 0xd3, 0x80, 0xbd, 0x48, 0x21, 0x82, 0x31, 0x94, + 0xf8, 0x06, 0x2d, 0x27, 0xbe, 0xe1, 0x49, 0x11, 0x1e, 0x91, 0x72, 0xed, 0x29, 0x21, 0x12, 0x1f, + 0x51, 0x22, 0x20, 0x26, 0x92, 0xa2, 0x64, 0x34, 0x0a, 0x02, 0xbd, 0x0d, 0xf3, 0xc1, 0xb6, 0xe7, + 0x85, 0x91, 0x85, 0x2f, 0x66, 0xaf, 0x51, 0x3c, 0x94, 0x38, 0xfa, 0x16, 0xbe, 0x3a, 0xbb, 0x2a, + 0x72, 0x9c, 0xa4, 0xa5, 0xff, 0x9e, 0xf8, 0xfc, 0x13, 0x2c, 0x95, 0x33, 0x14, 0x7d, 0x1f, 0x4a, + 0x89, 0xbe, 0xbc, 0xa7, 0x15, 0x7f, 0x4b, 0x7c, 0x85, 0x7c, 0xb0, 0xcc, 0x81, 0x79, 0x47, 0x7d, + 0x79, 0x44, 0x30, 0x66, 0xa1, 0x47, 0x4b, 0xc4, 0x7b, 0x9d, 0x4a, 0x11, 0x4e, 0x22, 0xa7, 0xb2, + 0x31, 0xea, 0xb8, 0xfa, 0x0e, 0x3b, 0x6b, 0xd8, 0x51, 0x01, 0x38, 0x59, 0x4f, 0x7f, 0x15, 0x3e, + 0xc8, 0xbb, 0xcd, 0xe2, 0xb7, 0xd7, 0xf6, 0x4c, 0x12, 0x04, 0x4d, 0x63, 0x60, 0x98, 0xd4, 0x06, + 0x61, 0x37, 0x20, 0xb9, 0x8b, 0xee, 0x99, 0x74, 0x06, 0x54, 0xe9, 0x17, 0x48, 0x67, 0x41, 0xd5, + 0xbf, 0x5c, 0x81, 0xba, 0x82, 0x33, 0x11, 0xc9, 0x12, 0x71, 0xdc, 0x37, 0x35, 0xa8, 0x19, 0xae, + 0xeb, 0x85, 0x86, 0x7a, 0x70, 0x44, 0x4a, 0xbc, 0x23, 0x97, 0x4b, 0x6b, 0x65, 0x35, 0xa6, 0x93, + 0x3a, 0x27, 0x56, 0x20, 0x58, 0xed, 0xce, 0xe5, 0x97, 0x61, 0x31, 0xdd, 0xea, 0x44, 0xc6, 0x46, + 0x03, 0x2e, 0x2a, 0xbd, 0x12, 0x17, 0xc9, 0xa8, 0x22, 0xfc, 0x0c, 0xcc, 0xec, 0xda, 0x81, 0x1d, + 0x5d, 0x4a, 0x56, 0x86, 0xf1, 0x35, 0x5e, 0x8c, 0x23, 0xb8, 0xfe, 0x0a, 0x9c, 0x57, 0x71, 0xb0, + 0x35, 0x73, 0xbb, 0x7b, 0x92, 0x89, 0x58, 0x87, 0x27, 0x15, 0x0c, 0x99, 0x57, 0xa9, 0x4e, 0x82, + 0xee, 0x67, 0xa7, 0x23, 0x16, 0x17, 0xb1, 0xfe, 0xdf, 0xd1, 0xe0, 0x51, 0x92, 0xc7, 0x31, 0x82, + 0xdf, 0x3f, 0x57, 0x76, 0x4a, 0x73, 0x59, 0x52, 0xa4, 0xae, 0xc9, 0x03, 0xe3, 0xfc, 0xae, 0xa1, + 0xfb, 0x00, 0x81, 0x9c, 0x93, 0x52, 0x21, 0xa0, 0x99, 0xb3, 0x2c, 0xf2, 0x95, 0xca, 0xdf, 0x58, + 0xa1, 0x86, 0x7c, 0xa8, 0x06, 0x62, 0x2e, 0x4b, 0xdd, 0xf8, 0xcc, 0xe0, 0x0d, 0x11, 0xe5, 0x26, + 0x7e, 0x61, 0x49, 0x07, 0xfd, 0x9a, 0x06, 0x17, 0x9c, 0x8c, 0xa5, 0x21, 0xe4, 0xfc, 0xdd, 0x87, + 0xb1, 0xec, 0xb8, 0xdb, 0x32, 0x0b, 0x82, 0x33, 0xfb, 0x82, 0xfe, 0x41, 0xee, 0xdd, 0x42, 0x1e, + 0x7d, 0xbd, 0x51, 0xb6, 0x97, 0xa7, 0x75, 0xcd, 0xf0, 0xdf, 0xcc, 0x70, 0xd5, 0x83, 0xf9, 0xd6, + 0x4c, 0x98, 0xde, 0x64, 0xda, 0xbb, 0xe0, 0xf7, 0xe2, 0xb6, 0x82, 0x78, 0xae, 0x9e, 0xe9, 0xd6, + 0xfc, 0x7f, 0x2c, 0x50, 0xa3, 0x15, 0x80, 0x4d, 0xc7, 0x33, 0x77, 0x9a, 0xed, 0x16, 0x8e, 0x44, + 0x3b, 0xe3, 0xb1, 0x86, 0x2c, 0xc5, 0x4a, 0x0d, 0xf4, 0x05, 0x98, 0xb0, 0xa4, 0xc2, 0xf7, 0x62, + 0x19, 0x5d, 0x37, 0x0e, 0xad, 0xa5, 0x1c, 0x45, 0xb1, 0x22, 0x0f, 0xaa, 0xae, 0xd8, 0xdd, 0x05, + 0xff, 0x14, 0x7f, 0xfe, 0x53, 0xaa, 0x09, 0x52, 0x3b, 0x89, 0x4a, 0xb0, 0x24, 0x42, 0x09, 0x4a, + 0xf5, 0x7d, 0xaa, 0x24, 0x41, 0xa9, 0xbb, 0x1f, 0x65, 0x59, 0x75, 0x54, 0x4d, 0x7c, 0x7a, 0x7c, + 0x4d, 0x7c, 0x3e, 0x57, 0x0b, 0xdf, 0x82, 0xe9, 0xd0, 0x60, 0x21, 0x1a, 0x33, 0x25, 0x22, 0x24, + 0xe8, 0x07, 0xdc, 0xa5, 0x68, 0x62, 0x2d, 0x84, 0xfd, 0x0c, 0xb0, 0xc0, 0x4e, 0xb9, 0x91, 0x3f, + 0x4d, 0x2a, 0x52, 0x47, 0x15, 0xe7, 0x46, 0xfe, 0xd6, 0x03, 0xe7, 0x46, 0xfe, 0x3f, 0x16, 0xa8, + 0xd1, 0x0e, 0xd5, 0x2d, 0xf9, 0x16, 0x20, 0xae, 0xfd, 0xae, 0x96, 0x5d, 0x9a, 0x41, 0x14, 0xa0, + 0xcb, 0x7f, 0x61, 0x49, 0x00, 0x99, 0x30, 0x23, 0x8c, 0x39, 0x91, 0x90, 0xf3, 0xc5, 0x32, 0x49, + 0x91, 0xa3, 0x27, 0x4c, 0xf8, 0x1d, 0xbf, 0x08, 0xb3, 0xfe, 0xdb, 0x13, 0xdc, 0x0e, 0x7f, 0x07, + 0x63, 0x5a, 0x7a, 0x50, 0x8d, 0x90, 0x95, 0x72, 0x6c, 0x44, 0x39, 0xf6, 0xf9, 0x80, 0xca, 0x8c, + 0xfb, 0x12, 0x39, 0x6a, 0x66, 0x5d, 0x07, 0x50, 0x92, 0x0d, 0x8d, 0x75, 0x15, 0x20, 0xfb, 0xf0, + 0x77, 0xb2, 0xd0, 0xe1, 0xef, 0x4b, 0x70, 0x2e, 0x8a, 0x8f, 0xb1, 0x08, 0xf3, 0x7d, 0x8b, 0x08, + 0x52, 0x76, 0x67, 0xad, 0x99, 0x04, 0xe1, 0x74, 0x5d, 0xfd, 0x26, 0xcc, 0xca, 0x75, 0x81, 0x9e, + 0x50, 0x74, 0xb4, 0x58, 0x74, 0xdd, 0x24, 0xfb, 0x5c, 0x61, 0xab, 0x27, 0x14, 0x36, 0x6e, 0x58, + 0xbd, 0x46, 0x0b, 0x84, 0xee, 0xa6, 0xff, 0x37, 0x8d, 0x33, 0x82, 0x78, 0x47, 0xc5, 0x80, 0x5a, + 0x9f, 0x27, 0x2f, 0x62, 0x89, 0x33, 0x8b, 0xdd, 0x90, 0x62, 0x07, 0xeb, 0xeb, 0x31, 0x1a, 0xac, + 0xe2, 0x44, 0x7b, 0xa3, 0x8f, 0xfd, 0x5c, 0x2f, 0xb9, 0x68, 0xc7, 0x7e, 0xf3, 0x07, 0x8d, 0xb6, + 0x51, 0x1f, 0x56, 0xd1, 0x8e, 0x7e, 0x58, 0xe5, 0xf8, 0xf7, 0x21, 0xf4, 0x7f, 0xaa, 0x41, 0x66, + 0x0e, 0x60, 0xa4, 0xc3, 0x34, 0x0f, 0xee, 0x55, 0x9f, 0x41, 0xe2, 0x91, 0xbf, 0x58, 0x40, 0x90, + 0x0f, 0x17, 0x44, 0xe4, 0xec, 0x4d, 0xb2, 0x1f, 0x3f, 0xbf, 0x23, 0x56, 0xc7, 0xf8, 0xf1, 0x73, + 0x2c, 0xab, 0x49, 0x37, 0x85, 0x09, 0x67, 0xe2, 0x66, 0xb1, 0x9b, 0x4c, 0x7f, 0x7a, 0x8f, 0x1c, + 0x69, 0xb2, 0xbe, 0x3e, 0xe4, 0xd8, 0x4d, 0x4e, 0xe3, 0xf8, 0xd8, 0x4d, 0x56, 0xef, 0xbd, 0x72, + 0x78, 0xca, 0x3a, 0x9b, 0xe3, 0x8f, 0xf9, 0x7d, 0x0d, 0x96, 0x46, 0xee, 0x43, 0x8f, 0x71, 0xf2, + 0x76, 0x86, 0x5e, 0x89, 0xa7, 0xd2, 0x0f, 0xaf, 0xd4, 0x32, 0x13, 0x25, 0xbc, 0x0e, 0xf3, 0x09, + 0x6f, 0x8c, 0xbc, 0x57, 0xa3, 0x65, 0xde, 0xab, 0x51, 0xaf, 0xcd, 0x54, 0x8e, 0xbc, 0x36, 0xf3, + 0xbf, 0xe7, 0xc4, 0x7c, 0x33, 0x1d, 0xf8, 0x0d, 0x98, 0x66, 0x77, 0x5b, 0xa2, 0x97, 0xbc, 0x3e, + 0x55, 0xfc, 0xd2, 0x4c, 0xc0, 0x45, 0x01, 0xff, 0x1f, 0x0b, 0xb4, 0xa8, 0x05, 0x8b, 0xec, 0xb1, + 0xe7, 0x8e, 0xef, 0x6d, 0xd9, 0x0e, 0xb9, 0x1d, 0x4b, 0x1d, 0x79, 0xa1, 0xb6, 0x99, 0x82, 0xe3, + 0x91, 0x16, 0xa8, 0xab, 0x6a, 0xc5, 0x9f, 0x2c, 0xea, 0x99, 0xe4, 0x09, 0x8c, 0xa4, 0x36, 0xec, + 0x03, 0x90, 0x68, 0xea, 0xa2, 0x78, 0x8b, 0x97, 0x0b, 0x5e, 0x16, 0x96, 0x1c, 0x10, 0xc9, 0x0d, + 0x59, 0x14, 0x60, 0x85, 0x0a, 0x0a, 0xa0, 0xb6, 0x1d, 0xbf, 0xf5, 0x22, 0x74, 0xe2, 0x57, 0xca, + 0x3e, 0x34, 0xc3, 0x37, 0x2a, 0xa5, 0x00, 0xab, 0x54, 0x50, 0x90, 0x78, 0x67, 0xbb, 0x4c, 0x1a, + 0xfd, 0x58, 0xc3, 0x38, 0xee, 0x8d, 0x6d, 0x4a, 0xd4, 0x95, 0x57, 0xd4, 0x44, 0xc4, 0x5b, 0x31, + 0xa2, 0xf1, 0x4d, 0xb7, 0x98, 0x68, 0x5c, 0x86, 0x15, 0x32, 0x74, 0x78, 0xfb, 0xf1, 0xc5, 0x44, + 0xa1, 0x49, 0xbf, 0x52, 0xf6, 0x86, 0xa8, 0xd0, 0x03, 0xe2, 0x02, 0xac, 0x52, 0x41, 0x1e, 0x40, + 0x5f, 0x5e, 0x58, 0x14, 0x6a, 0x75, 0xb1, 0x2f, 0x8d, 0xef, 0x3d, 0x72, 0x1b, 0x31, 0xfe, 0x8d, + 0x15, 0x12, 0x54, 0x8b, 0x97, 0x56, 0x15, 0x94, 0x50, 0x38, 0xc7, 0xb2, 0xa8, 0x3e, 0x1e, 0x6b, + 0x15, 0x35, 0xb6, 0x6e, 0x1f, 0x53, 0x34, 0x8a, 0x07, 0x07, 0xf5, 0x39, 0x26, 0x4c, 0x46, 0x34, + 0x8c, 0xd8, 0xf9, 0x3a, 0x77, 0x94, 0xf3, 0x15, 0x5d, 0x87, 0xa5, 0x40, 0x3d, 0xa3, 0x64, 0x02, + 0x62, 0x9e, 0x35, 0x91, 0x57, 0x5c, 0xbb, 0xe9, 0x0a, 0x78, 0xb4, 0x0d, 0x97, 0x80, 0xc4, 0x62, + 0xed, 0x17, 0x54, 0x09, 0xc8, 0xcb, 0xb0, 0x84, 0xa2, 0x7b, 0xa9, 0x27, 0xa9, 0xcf, 0x95, 0x36, + 0x84, 0xc6, 0x7b, 0xa0, 0x1a, 0xfd, 0x94, 0xfa, 0x74, 0xd8, 0x62, 0x89, 0x10, 0x9c, 0x22, 0x6f, + 0x87, 0xa1, 0xdd, 0x64, 0x0c, 0xea, 0xd2, 0xe9, 0x84, 0xfc, 0x49, 0x87, 0x6c, 0x6e, 0xfc, 0xe9, + 0xbf, 0xa3, 0x8a, 0x79, 0xa4, 0x88, 0x90, 0x33, 0xd0, 0xce, 0x48, 0x42, 0x3b, 0x6b, 0x96, 0xd3, + 0x9c, 0x48, 0x6e, 0xd4, 0xd9, 0x1f, 0x69, 0xb0, 0x10, 0x57, 0x3b, 0x03, 0xed, 0xc9, 0x4a, 0x6a, + 0x4f, 0x9f, 0x2e, 0xf9, 0x61, 0x39, 0x2a, 0xd4, 0xff, 0xaf, 0xa8, 0x9f, 0xc5, 0x94, 0x84, 0x7b, + 0x8a, 0x81, 0xcb, 0x4d, 0xea, 0x76, 0x29, 0x03, 0x57, 0x8d, 0xe5, 0x89, 0xbf, 0x38, 0xc3, 0xe0, + 0xfd, 0x52, 0x62, 0x87, 0x2e, 0x13, 0xb4, 0x96, 0xfd, 0x06, 0xe1, 0xb1, 0xdb, 0xb5, 0xaf, 0xae, + 0x58, 0x1e, 0x47, 0xb2, 0x5a, 0x30, 0x50, 0x4a, 0xf9, 0xe4, 0xa3, 0xdf, 0xf8, 0xfb, 0xda, 0x2c, + 0xd4, 0x14, 0xc5, 0xfd, 0x1d, 0x71, 0x69, 0x0c, 0xa1, 0x66, 0x7a, 0x6e, 0x10, 0xfa, 0x86, 0x72, + 0x39, 0xa9, 0x2c, 0x51, 0x29, 0x2a, 0x9a, 0x31, 0x6a, 0xac, 0xd2, 0xa1, 0x1b, 0x9b, 0x64, 0xb4, + 0x89, 0xd3, 0xf0, 0xa4, 0x1c, 0xc5, 0x5c, 0x1f, 0x03, 0x88, 0x94, 0x24, 0xf9, 0x04, 0xaa, 0xcc, + 0x00, 0xd7, 0x0e, 0x6e, 0x48, 0x18, 0x56, 0xea, 0xa1, 0xb7, 0x61, 0xde, 0x51, 0x73, 0x3f, 0x09, + 0x15, 0xae, 0xd8, 0x79, 0x6b, 0x22, 0x8b, 0x54, 0x74, 0x54, 0xa8, 0x14, 0xe1, 0x24, 0x2d, 0xb4, + 0x03, 0xb3, 0x4e, 0x94, 0xb0, 0x4c, 0xe8, 0x71, 0x2f, 0x17, 0x26, 0xcc, 0xb0, 0x70, 0xc7, 0xa7, + 0xfc, 0x89, 0x63, 0xfc, 0x94, 0xef, 0xe4, 0x8f, 0x72, 0xce, 0xcf, 0x98, 0x9a, 0xe4, 0x3b, 0x59, + 0x14, 0x60, 0x85, 0x4a, 0x8e, 0x73, 0xaa, 0x5a, 0xc8, 0x39, 0x35, 0x84, 0xf3, 0x3e, 0x09, 0xfd, + 0xfd, 0xe6, 0xbe, 0xc9, 0x5e, 0xd6, 0xf3, 0x43, 0x96, 0x9b, 0x6c, 0xb6, 0xd8, 0x25, 0x03, 0x3c, + 0x8a, 0x0a, 0x67, 0xe1, 0xa7, 0x76, 0x1a, 0xdd, 0xf4, 0xc5, 0x1d, 0x5f, 0x66, 0xa7, 0xb1, 0x87, + 0xf7, 0x59, 0x29, 0xfa, 0x38, 0xd4, 0x42, 0x62, 0x6e, 0xbb, 0xb6, 0x69, 0x38, 0xed, 0x96, 0xd0, + 0xa8, 0xe2, 0x3d, 0x34, 0x06, 0x61, 0xb5, 0x1e, 0x6a, 0xc0, 0xc4, 0xd0, 0xb6, 0x84, 0x2a, 0xf5, + 0xe3, 0xf2, 0x49, 0xdd, 0x76, 0xeb, 0xc1, 0x41, 0xfd, 0xfd, 0x71, 0x08, 0xad, 0xfc, 0x92, 0xab, + 0x83, 0x9d, 0xde, 0xd5, 0x70, 0x7f, 0x40, 0x82, 0x95, 0x8d, 0x76, 0x0b, 0xd3, 0xc6, 0x59, 0xce, + 0xba, 0xf9, 0x13, 0x38, 0xeb, 0x6e, 0x01, 0xc4, 0x5b, 0x7c, 0x69, 0x6f, 0xdd, 0x7f, 0x9e, 0x82, + 0x8b, 0x65, 0x4f, 0x35, 0x59, 0xde, 0x2e, 0xf6, 0xb4, 0xc0, 0xea, 0x56, 0x48, 0xfc, 0x3b, 0x77, + 0xd6, 0xef, 0x6e, 0xfb, 0x24, 0xd8, 0xf6, 0x1c, 0xab, 0x60, 0xe2, 0xb0, 0xf8, 0x71, 0x8a, 0x11, + 0x8c, 0x38, 0x87, 0x12, 0x6a, 0xc2, 0x52, 0xf4, 0xbc, 0x01, 0xa6, 0x5b, 0xca, 0xd0, 0x0f, 0x42, + 0x11, 0x65, 0xc9, 0x9c, 0xb2, 0x6b, 0x69, 0x20, 0x1e, 0xad, 0x9f, 0x46, 0xc2, 0x9f, 0x39, 0x98, + 0x64, 0x2f, 0x22, 0x8e, 0x20, 0xe1, 0x6f, 0x1d, 0x8c, 0xd6, 0x57, 0x91, 0xc8, 0x17, 0x14, 0x99, + 0x78, 0x4a, 0x21, 0x89, 0x9f, 0x57, 0x1c, 0xad, 0x8f, 0x2c, 0x78, 0xdc, 0x27, 0xa6, 0xd7, 0xef, + 0x13, 0xd7, 0xe2, 0x39, 0x1f, 0x0d, 0xbf, 0x67, 0xbb, 0xd7, 0x7c, 0x83, 0x55, 0x14, 0xcf, 0x34, + 0x3e, 0x79, 0x78, 0x50, 0x7f, 0x1c, 0x1f, 0x51, 0x0f, 0x1f, 0x89, 0x05, 0xf5, 0xe1, 0xdc, 0x90, + 0xa5, 0xb8, 0xf1, 0xdb, 0x6e, 0x48, 0xfc, 0x5d, 0xc3, 0x29, 0xf8, 0x7e, 0x07, 0xe3, 0xdd, 0x8d, + 0x24, 0x2a, 0x9c, 0xc6, 0x8d, 0xf6, 0xa9, 0x28, 0x10, 0xdd, 0x51, 0x48, 0x56, 0x8b, 0x67, 0xb6, + 0xc3, 0xa3, 0xe8, 0x70, 0x16, 0x0d, 0xfd, 0xef, 0x68, 0x20, 0x0e, 0x61, 0xe4, 0x73, 0xc6, 0x5a, + 0xe6, 0x73, 0xc6, 0xa7, 0xf3, 0xac, 0xf7, 0x5f, 0x88, 0xdf, 0x4a, 0xcf, 0x7c, 0xd4, 0x5b, 0xbe, + 0x6a, 0x7f, 0x0f, 0x44, 0x63, 0x96, 0x47, 0x6c, 0xac, 0x24, 0x54, 0xc7, 0x3f, 0x2a, 0x1c, 0xa7, + 0xc1, 0x9a, 0xc8, 0x4d, 0x83, 0xf5, 0x1d, 0x0d, 0xd2, 0x8f, 0xb9, 0xa1, 0xa7, 0x60, 0x46, 0xdc, + 0x4b, 0x12, 0x37, 0x1b, 0x78, 0x84, 0x1a, 0x2f, 0xc2, 0x11, 0x2c, 0x69, 0x3f, 0x95, 0xd1, 0x06, + 0xb3, 0x63, 0xaf, 0x8f, 0xd1, 0xcb, 0x0e, 0x16, 0x60, 0x9a, 0x5f, 0xa4, 0x41, 0x3f, 0x9d, 0x19, + 0x02, 0x73, 0xab, 0xc4, 0x5d, 0x9d, 0x02, 0x91, 0x2e, 0x89, 0x54, 0x27, 0x95, 0x23, 0x53, 0x9d, + 0x74, 0x79, 0x3e, 0xba, 0x32, 0xee, 0xb3, 0x26, 0x6e, 0x8b, 0x84, 0xe8, 0x22, 0x17, 0x1d, 0x1a, + 0x26, 0xbc, 0x4a, 0x93, 0x25, 0x72, 0xed, 0xf1, 0x21, 0x50, 0x7c, 0x4b, 0x0b, 0x47, 0xf8, 0x95, + 0xe2, 0x8b, 0x0b, 0x53, 0x25, 0x8e, 0x5c, 0xc4, 0xa8, 0x8f, 0x71, 0x71, 0x41, 0xb2, 0xfd, 0x74, + 0x2e, 0xdb, 0xf7, 0x60, 0x46, 0x08, 0x0e, 0x21, 0xc5, 0x5e, 0x2c, 0x93, 0x6d, 0x4e, 0xb9, 0x73, + 0xca, 0x0b, 0x70, 0x84, 0x9d, 0xee, 0x8d, 0x7d, 0x63, 0xcf, 0xee, 0x0f, 0xfb, 0x4c, 0x76, 0x4d, + 0xa9, 0x55, 0x59, 0x31, 0x8e, 0xe0, 0xac, 0x2a, 0x3f, 0xab, 0x62, 0x1a, 0x8f, 0x5a, 0x55, 0xbc, + 0xe9, 0x11, 0xc1, 0xd1, 0xe7, 0xa1, 0xda, 0x37, 0xf6, 0xba, 0x43, 0xbf, 0x47, 0x84, 0x3b, 0x29, + 0xdf, 0x22, 0x1f, 0x86, 0xb6, 0xb3, 0x42, 0x75, 0xf5, 0xd0, 0x5f, 0x69, 0xbb, 0xe1, 0x1d, 0xbf, + 0x1b, 0xfa, 0x32, 0xab, 0xd6, 0xba, 0xc0, 0x82, 0x25, 0x3e, 0xe4, 0xc0, 0x42, 0xdf, 0xd8, 0xdb, + 0x70, 0x0d, 0xf9, 0xf4, 0x40, 0xad, 0x20, 0x05, 0xe6, 0x5b, 0x5f, 0x4f, 0xe0, 0xc2, 0x29, 0xdc, + 0x19, 0x6e, 0xfc, 0xb9, 0x87, 0xe5, 0xc6, 0x5f, 0x95, 0x61, 0x01, 0xf3, 0x8c, 0x0d, 0x1f, 0xcd, + 0x3a, 0xd4, 0x3a, 0xfa, 0xc4, 0xff, 0x0d, 0x79, 0xe2, 0xbf, 0x50, 0xc2, 0xf7, 0x7e, 0xc4, 0x69, + 0xff, 0x2e, 0xd4, 0x2c, 0x23, 0x34, 0x78, 0x69, 0xb0, 0x7c, 0xae, 0x84, 0xdb, 0xa0, 0x25, 0xf1, + 0x28, 0x09, 0x81, 0x63, 0xdc, 0x58, 0x25, 0x84, 0xee, 0xf0, 0xd4, 0xf4, 0x0e, 0x09, 0xe3, 0x2a, + 0xcc, 0x2f, 0xb7, 0xc8, 0xfd, 0x7a, 0x51, 0x22, 0xf9, 0x91, 0x0a, 0x38, 0xbb, 0x1d, 0x55, 0x27, + 0x79, 0x68, 0xf2, 0x52, 0x7c, 0xdb, 0x3d, 0x71, 0x9b, 0xe9, 0x6f, 0x69, 0xb0, 0xc8, 0x1f, 0x0d, + 0x6a, 0x7a, 0xfd, 0x81, 0xe7, 0x12, 0x3a, 0x31, 0x88, 0x8d, 0xea, 0xcd, 0x12, 0xf2, 0xa1, 0x9b, + 0x42, 0x29, 0x0e, 0x24, 0x53, 0xa5, 0x78, 0x84, 0x34, 0xfa, 0xe7, 0x1a, 0x2c, 0xf7, 0x73, 0xf2, + 0xed, 0x2e, 0x9f, 0x2f, 0x11, 0x13, 0x75, 0x5c, 0x12, 0x5f, 0xfe, 0xa0, 0xd9, 0x71, 0xb5, 0x70, + 0x6e, 0xe7, 0xca, 0x86, 0x4f, 0x96, 0xb9, 0xe6, 0xf5, 0x97, 0x61, 0x31, 0xbd, 0x13, 0xa8, 0xb9, + 0xf4, 0xb5, 0x87, 0x9b, 0x4b, 0x5f, 0x7f, 0x09, 0x2e, 0x65, 0xcf, 0x3a, 0xd5, 0x8e, 0xd8, 0xd3, + 0x14, 0xc2, 0x20, 0x89, 0x93, 0xc3, 0xd1, 0x42, 0xcc, 0x61, 0x8d, 0x95, 0xef, 0xfe, 0xe8, 0xca, + 0xfb, 0xbe, 0xff, 0xa3, 0x2b, 0xef, 0xfb, 0xc1, 0x8f, 0xae, 0xbc, 0xef, 0xcb, 0x87, 0x57, 0xb4, + 0xef, 0x1e, 0x5e, 0xd1, 0xbe, 0x7f, 0x78, 0x45, 0xfb, 0xc1, 0xe1, 0x15, 0xed, 0x3f, 0x1e, 0x5e, + 0xd1, 0xfe, 0xf6, 0x7f, 0xba, 0xf2, 0xbe, 0xcf, 0x57, 0xa3, 0x2e, 0xfd, 0x79, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xa6, 0x4b, 0xaa, 0xf1, 0x38, 0xaf, 0x00, 0x00, +} + +func (m *Addon) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Addon) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Addon) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *Addons) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Addons) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Addons) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NginxIngress != nil { + { + size, err := m.NginxIngress.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.KubernetesDashboard != nil { + { + size, err := m.KubernetesDashboard.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AdmissionPlugin) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionPlugin) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AdmissionPlugin) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Alerting) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Alerting) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Alerting) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.EmailReceivers) > 0 { + for iNdEx := len(m.EmailReceivers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.EmailReceivers[iNdEx]) + copy(dAtA[i:], m.EmailReceivers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EmailReceivers[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AuditConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AuditPolicy != nil { + { + size, err := m.AuditPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuditPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ConfigMapRef != nil { + { + size, err := m.ConfigMapRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AvailabilityZone) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AvailabilityZone) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AvailabilityZone) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.UnavailableVolumeTypes) > 0 { + for iNdEx := len(m.UnavailableVolumeTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.UnavailableVolumeTypes[iNdEx]) + copy(dAtA[i:], m.UnavailableVolumeTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UnavailableVolumeTypes[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.UnavailableMachineTypes) > 0 { + for iNdEx := len(m.UnavailableMachineTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.UnavailableMachineTypes[iNdEx]) + copy(dAtA[i:], m.UnavailableMachineTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UnavailableMachineTypes[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BackupBucket) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BackupBucket) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BackupBucket) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BackupBucketList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BackupBucketList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BackupBucketList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BackupBucketProvider) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BackupBucketProvider) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BackupBucketProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Region) + copy(dAtA[i:], m.Region) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BackupBucketSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BackupBucketSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BackupBucketSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Seed != nil { + i -= len(*m.Seed) + copy(dAtA[i:], *m.Seed) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Seed))) + i-- + dAtA[i] = 0x22 + } + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.ProviderConfig != nil { + { + size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Provider.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BackupBucketStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BackupBucketStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BackupBucketStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.GeneratedSecretRef != nil { + { + size, err := m.GeneratedSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x20 + if m.LastError != nil { + { + size, err := m.LastError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.LastOperation != nil { + { + size, err := m.LastOperation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ProviderStatus != nil { + { + size, err := m.ProviderStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BackupEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BackupEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BackupEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BackupEntryList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BackupEntryList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BackupEntryList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BackupEntrySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BackupEntrySpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BackupEntrySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Seed != nil { + i -= len(*m.Seed) + copy(dAtA[i:], *m.Seed) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Seed))) + i-- + dAtA[i] = 0x12 + } + i -= len(m.BucketName) + copy(dAtA[i:], m.BucketName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BucketName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BackupEntryStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BackupEntryStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BackupEntryStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + if m.LastError != nil { + { + size, err := m.LastError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.LastOperation != nil { + { + size, err := m.LastOperation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CRI) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CRI) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CRI) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ContainerRuntimes) > 0 { + for iNdEx := len(m.ContainerRuntimes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ContainerRuntimes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CloudInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CloudInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CloudInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Region) + copy(dAtA[i:], m.Region) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CloudProfile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CloudProfile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CloudProfile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CloudProfileList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CloudProfileList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CloudProfileList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CloudProfileSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CloudProfileSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CloudProfileSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.VolumeTypes) > 0 { + for iNdEx := len(m.VolumeTypes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeTypes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x42 + if m.SeedSelector != nil { + { + size, err := m.SeedSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if len(m.Regions) > 0 { + for iNdEx := len(m.Regions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Regions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.ProviderConfig != nil { + { + size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.MachineTypes) > 0 { + for iNdEx := len(m.MachineTypes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MachineTypes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.MachineImages) > 0 { + for iNdEx := len(m.MachineImages) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MachineImages[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.Kubernetes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if m.CABundle != nil { + i -= len(*m.CABundle) + copy(dAtA[i:], *m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CABundle))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterAutoscaler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterAutoscaler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ScanInterval != nil { + { + size, err := m.ScanInterval.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.ScaleDownUtilizationThreshold != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.ScaleDownUtilizationThreshold)))) + i-- + dAtA[i] = 0x29 + } + if m.ScaleDownUnneededTime != nil { + { + size, err := m.ScaleDownUnneededTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.ScaleDownDelayAfterFailure != nil { + { + size, err := m.ScaleDownDelayAfterFailure.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ScaleDownDelayAfterDelete != nil { + { + size, err := m.ScaleDownDelayAfterDelete.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ScaleDownDelayAfterAdd != nil { + { + size, err := m.ScaleDownDelayAfterAdd.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Kubernetes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Cloud.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Condition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Condition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Condition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Codes) > 0 { + for iNdEx := len(m.Codes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Codes[iNdEx]) + copy(dAtA[i:], m.Codes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Codes[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerRuntime) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerRuntime) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerRuntime) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ProviderConfig != nil { + { + size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ControllerDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SeedSelector != nil { + { + size, err := m.SeedSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Policy != nil { + i -= len(*m.Policy) + copy(dAtA[i:], *m.Policy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Policy))) + i-- + dAtA[i] = 0x1a + } + if m.ProviderConfig != nil { + { + size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ControllerInstallation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerInstallation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerInstallation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ControllerInstallationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerInstallationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerInstallationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ControllerInstallationSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerInstallationSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerInstallationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.SeedRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.RegistrationRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ControllerInstallationStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerInstallationStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerInstallationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ProviderStatus != nil { + { + size, err := m.ProviderStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ControllerRegistration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRegistration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRegistration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ControllerRegistrationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRegistrationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRegistrationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ControllerRegistrationSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRegistrationSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRegistrationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Deployment != nil { + { + size, err := m.Deployment.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ControllerResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Primary != nil { + i-- + if *m.Primary { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.ReconcileTimeout != nil { + { + size, err := m.ReconcileTimeout.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.GloballyEnabled != nil { + i-- + if *m.GloballyEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DNS) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DNS) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DNS) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Providers) > 0 { + for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Domain != nil { + i -= len(*m.Domain) + copy(dAtA[i:], *m.Domain) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Domain))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DNSIncludeExclude) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DNSIncludeExclude) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DNSIncludeExclude) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Exclude) > 0 { + for iNdEx := len(m.Exclude) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Exclude[iNdEx]) + copy(dAtA[i:], m.Exclude[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Exclude[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Include) > 0 { + for iNdEx := len(m.Include) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Include[iNdEx]) + copy(dAtA[i:], m.Include[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Include[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DNSProvider) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DNSProvider) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DNSProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Zones != nil { + { + size, err := m.Zones.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Type != nil { + i -= len(*m.Type) + copy(dAtA[i:], *m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i-- + dAtA[i] = 0x22 + } + if m.SecretName != nil { + i -= len(*m.SecretName) + copy(dAtA[i:], *m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretName))) + i-- + dAtA[i] = 0x1a + } + if m.Primary != nil { + i-- + if *m.Primary { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Domains != nil { + { + size, err := m.Domains.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DataVolume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DataVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DataVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Encrypted != nil { + i-- + if *m.Encrypted { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + i -= len(m.VolumeSize) + copy(dAtA[i:], m.VolumeSize) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeSize))) + i-- + dAtA[i] = 0x1a + if m.Type != nil { + i -= len(*m.Type) + copy(dAtA[i:], *m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Endpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Purpose) + copy(dAtA[i:], m.Purpose) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Purpose))) + i-- + dAtA[i] = 0x1a + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExpirableVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExpirableVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExpirableVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Classification != nil { + i -= len(*m.Classification) + copy(dAtA[i:], *m.Classification) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Classification))) + i-- + dAtA[i] = 0x1a + } + if m.ExpirationDate != nil { + { + size, err := m.ExpirationDate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Extension) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Extension) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Extension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Disabled != nil { + i-- + if *m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.ProviderConfig != nil { + { + size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExtensionResourceState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExtensionResourceState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExtensionResourceState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.State != nil { + { + size, err := m.State.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Purpose != nil { + i -= len(*m.Purpose) + copy(dAtA[i:], *m.Purpose) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Purpose))) + i-- + dAtA[i] = 0x1a + } + if m.Name != nil { + i -= len(*m.Name) + copy(dAtA[i:], *m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) + i-- + dAtA[i] = 0x12 + } + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Gardener) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Gardener) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Gardener) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GardenerResourceData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GardenerResourceData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GardenerResourceData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Hibernation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Hibernation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Hibernation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Schedules) > 0 { + for iNdEx := len(m.Schedules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Schedules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Enabled != nil { + i-- + if *m.Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HibernationSchedule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HibernationSchedule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HibernationSchedule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Location != nil { + i -= len(*m.Location) + copy(dAtA[i:], *m.Location) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Location))) + i-- + dAtA[i] = 0x1a + } + if m.End != nil { + i -= len(*m.End) + copy(dAtA[i:], *m.End) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.End))) + i-- + dAtA[i] = 0x12 + } + if m.Start != nil { + i -= len(*m.Start) + copy(dAtA[i:], *m.Start) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Start))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.UpscaleDelay != nil { + { + size, err := m.UpscaleDelay.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.Tolerance != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Tolerance)))) + i-- + dAtA[i] = 0x31 + } + if m.SyncPeriod != nil { + { + size, err := m.SyncPeriod.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.InitialReadinessDelay != nil { + { + size, err := m.InitialReadinessDelay.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.DownscaleStabilization != nil { + { + size, err := m.DownscaleStabilization.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.DownscaleDelay != nil { + { + size, err := m.DownscaleDelay.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.CPUInitializationPeriod != nil { + { + size, err := m.CPUInitializationPeriod.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Ingress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Controller.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressController) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressController) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressController) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ProviderConfig != nil { + { + size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *KubeAPIServerConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KubeAPIServerConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KubeAPIServerConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Requests != nil { + { + size, err := m.Requests.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.WatchCacheSizes != nil { + { + size, err := m.WatchCacheSizes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.ServiceAccountConfig != nil { + { + size, err := m.ServiceAccountConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if len(m.RuntimeConfig) > 0 { + keysForRuntimeConfig := make([]string, 0, len(m.RuntimeConfig)) + for k := range m.RuntimeConfig { + keysForRuntimeConfig = append(keysForRuntimeConfig, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRuntimeConfig) + for iNdEx := len(keysForRuntimeConfig) - 1; iNdEx >= 0; iNdEx-- { + v := m.RuntimeConfig[string(keysForRuntimeConfig[iNdEx])] + baseI := i + i-- + if v { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(keysForRuntimeConfig[iNdEx]) + copy(dAtA[i:], keysForRuntimeConfig[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRuntimeConfig[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if m.OIDCConfig != nil { + { + size, err := m.OIDCConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.EnableBasicAuthentication != nil { + i-- + if *m.EnableBasicAuthentication { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.AuditConfig != nil { + { + size, err := m.AuditConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.APIAudiences) > 0 { + for iNdEx := len(m.APIAudiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIAudiences[iNdEx]) + copy(dAtA[i:], m.APIAudiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIAudiences[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.AdmissionPlugins) > 0 { + for iNdEx := len(m.AdmissionPlugins) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AdmissionPlugins[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.KubernetesConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *KubeAPIServerRequests) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KubeAPIServerRequests) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KubeAPIServerRequests) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxMutatingInflight != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxMutatingInflight)) + i-- + dAtA[i] = 0x10 + } + if m.MaxNonMutatingInflight != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxNonMutatingInflight)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *KubeControllerManagerConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KubeControllerManagerConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KubeControllerManagerConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PodEvictionTimeout != nil { + { + size, err := m.PodEvictionTimeout.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.NodeCIDRMaskSize != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.NodeCIDRMaskSize)) + i-- + dAtA[i] = 0x18 + } + if m.HorizontalPodAutoscalerConfig != nil { + { + size, err := m.HorizontalPodAutoscalerConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.KubernetesConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *KubeProxyConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KubeProxyConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KubeProxyConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Mode != nil { + i -= len(*m.Mode) + copy(dAtA[i:], *m.Mode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Mode))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.KubernetesConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *KubeSchedulerConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KubeSchedulerConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KubeSchedulerConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.KubeMaxPDVols != nil { + i -= len(*m.KubeMaxPDVols) + copy(dAtA[i:], *m.KubeMaxPDVols) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.KubeMaxPDVols))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.KubernetesConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *KubeletConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KubeletConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KubeletConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SystemReserved != nil { + { + size, err := m.SystemReserved.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + if m.KubeReserved != nil { + { + size, err := m.KubeReserved.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.FailSwapOn != nil { + i-- + if *m.FailSwapOn { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + } + if m.ImagePullProgressDeadline != nil { + { + size, err := m.ImagePullProgressDeadline.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.PodPIDsLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.PodPIDsLimit)) + i-- + dAtA[i] = 0x58 + } + if m.MaxPods != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxPods)) + i-- + dAtA[i] = 0x50 + } + if m.EvictionSoftGracePeriod != nil { + { + size, err := m.EvictionSoftGracePeriod.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.EvictionSoft != nil { + { + size, err := m.EvictionSoft.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.EvictionPressureTransitionPeriod != nil { + { + size, err := m.EvictionPressureTransitionPeriod.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.EvictionMinimumReclaim != nil { + { + size, err := m.EvictionMinimumReclaim.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.EvictionMaxPodGracePeriod != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.EvictionMaxPodGracePeriod)) + i-- + dAtA[i] = 0x28 + } + if m.EvictionHard != nil { + { + size, err := m.EvictionHard.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.CPUManagerPolicy != nil { + i -= len(*m.CPUManagerPolicy) + copy(dAtA[i:], *m.CPUManagerPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CPUManagerPolicy))) + i-- + dAtA[i] = 0x1a + } + if m.CPUCFSQuota != nil { + i-- + if *m.CPUCFSQuota { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + { + size, err := m.KubernetesConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *KubeletConfigEviction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KubeletConfigEviction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KubeletConfigEviction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NodeFSInodesFree != nil { + i -= len(*m.NodeFSInodesFree) + copy(dAtA[i:], *m.NodeFSInodesFree) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeFSInodesFree))) + i-- + dAtA[i] = 0x2a + } + if m.NodeFSAvailable != nil { + i -= len(*m.NodeFSAvailable) + copy(dAtA[i:], *m.NodeFSAvailable) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeFSAvailable))) + i-- + dAtA[i] = 0x22 + } + if m.ImageFSInodesFree != nil { + i -= len(*m.ImageFSInodesFree) + copy(dAtA[i:], *m.ImageFSInodesFree) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ImageFSInodesFree))) + i-- + dAtA[i] = 0x1a + } + if m.ImageFSAvailable != nil { + i -= len(*m.ImageFSAvailable) + copy(dAtA[i:], *m.ImageFSAvailable) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ImageFSAvailable))) + i-- + dAtA[i] = 0x12 + } + if m.MemoryAvailable != nil { + i -= len(*m.MemoryAvailable) + copy(dAtA[i:], *m.MemoryAvailable) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MemoryAvailable))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KubeletConfigEvictionMinimumReclaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KubeletConfigEvictionMinimumReclaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KubeletConfigEvictionMinimumReclaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NodeFSInodesFree != nil { + { + size, err := m.NodeFSInodesFree.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.NodeFSAvailable != nil { + { + size, err := m.NodeFSAvailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.ImageFSInodesFree != nil { + { + size, err := m.ImageFSInodesFree.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImageFSAvailable != nil { + { + size, err := m.ImageFSAvailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.MemoryAvailable != nil { + { + size, err := m.MemoryAvailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KubeletConfigEvictionSoftGracePeriod) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KubeletConfigEvictionSoftGracePeriod) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KubeletConfigEvictionSoftGracePeriod) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NodeFSInodesFree != nil { + { + size, err := m.NodeFSInodesFree.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.NodeFSAvailable != nil { + { + size, err := m.NodeFSAvailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.ImageFSInodesFree != nil { + { + size, err := m.ImageFSInodesFree.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ImageFSAvailable != nil { + { + size, err := m.ImageFSAvailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.MemoryAvailable != nil { + { + size, err := m.MemoryAvailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KubeletConfigReserved) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KubeletConfigReserved) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KubeletConfigReserved) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PID != nil { + { + size, err := m.PID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.EphemeralStorage != nil { + { + size, err := m.EphemeralStorage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Memory != nil { + { + size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.CPU != nil { + { + size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Kubernetes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Kubernetes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Kubernetes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.VerticalPodAutoscaler != nil { + { + size, err := m.VerticalPodAutoscaler.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x42 + if m.Kubelet != nil { + { + size, err := m.Kubelet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.KubeProxy != nil { + { + size, err := m.KubeProxy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.KubeScheduler != nil { + { + size, err := m.KubeScheduler.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.KubeControllerManager != nil { + { + size, err := m.KubeControllerManager.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.KubeAPIServer != nil { + { + size, err := m.KubeAPIServer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ClusterAutoscaler != nil { + { + size, err := m.ClusterAutoscaler.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.AllowPrivilegedContainers != nil { + i-- + if *m.AllowPrivilegedContainers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *KubernetesConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KubernetesConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KubernetesConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.FeatureGates) > 0 { + keysForFeatureGates := make([]string, 0, len(m.FeatureGates)) + for k := range m.FeatureGates { + keysForFeatureGates = append(keysForFeatureGates, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForFeatureGates) + for iNdEx := len(keysForFeatureGates) - 1; iNdEx >= 0; iNdEx-- { + v := m.FeatureGates[string(keysForFeatureGates[iNdEx])] + baseI := i + i-- + if v { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(keysForFeatureGates[iNdEx]) + copy(dAtA[i:], keysForFeatureGates[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForFeatureGates[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *KubernetesDashboard) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KubernetesDashboard) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KubernetesDashboard) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Addon.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if m.AuthenticationMode != nil { + i -= len(*m.AuthenticationMode) + copy(dAtA[i:], *m.AuthenticationMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AuthenticationMode))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KubernetesInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KubernetesInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KubernetesInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *KubernetesSettings) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KubernetesSettings) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KubernetesSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LastError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LastError) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LastError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastUpdateTime != nil { + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Codes) > 0 { + for iNdEx := len(m.Codes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Codes[iNdEx]) + copy(dAtA[i:], m.Codes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Codes[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.TaskID != nil { + i -= len(*m.TaskID) + copy(dAtA[i:], *m.TaskID) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.TaskID))) + i-- + dAtA[i] = 0x12 + } + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LastOperation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LastOperation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LastOperation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x2a + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.Progress)) + i-- + dAtA[i] = 0x18 + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Machine) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Machine) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Machine) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MachineControllerManagerSettings) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MachineControllerManagerSettings) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MachineControllerManagerSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NodeConditions) > 0 { + for iNdEx := len(m.NodeConditions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NodeConditions[iNdEx]) + copy(dAtA[i:], m.NodeConditions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeConditions[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.MaxEvictRetries != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxEvictRetries)) + i-- + dAtA[i] = 0x20 + } + if m.MachineCreationTimeout != nil { + { + size, err := m.MachineCreationTimeout.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.MachineHealthTimeout != nil { + { + size, err := m.MachineHealthTimeout.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.MachineDrainTimeout != nil { + { + size, err := m.MachineDrainTimeout.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MachineImage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MachineImage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MachineImage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MachineImageVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MachineImageVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MachineImageVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CRI) > 0 { + for iNdEx := len(m.CRI) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CRI[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ExpirableVersion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MachineType) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MachineType) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MachineType) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Usable != nil { + i-- + if *m.Usable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.Storage != nil { + { + size, err := m.Storage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.GPU.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MachineTypeStorage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MachineTypeStorage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MachineTypeStorage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x1a + { + size, err := m.StorageSize.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Class) + copy(dAtA[i:], m.Class) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Class))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Maintenance) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Maintenance) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Maintenance) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ConfineSpecUpdateRollout != nil { + i-- + if *m.ConfineSpecUpdateRollout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.TimeWindow != nil { + { + size, err := m.TimeWindow.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.AutoUpdate != nil { + { + size, err := m.AutoUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MaintenanceAutoUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MaintenanceAutoUpdate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MaintenanceAutoUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.MachineImageVersion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.KubernetesVersion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *MaintenanceTimeWindow) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MaintenanceTimeWindow) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MaintenanceTimeWindow) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.End) + copy(dAtA[i:], m.End) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.End))) + i-- + dAtA[i] = 0x12 + i -= len(m.Begin) + copy(dAtA[i:], m.Begin) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Begin))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Monitoring) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Monitoring) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Monitoring) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Alerting != nil { + { + size, err := m.Alerting.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NamedResourceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedResourceReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedResourceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ResourceRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Networking) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Networking) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Networking) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Services != nil { + i -= len(*m.Services) + copy(dAtA[i:], *m.Services) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Services))) + i-- + dAtA[i] = 0x2a + } + if m.Nodes != nil { + i -= len(*m.Nodes) + copy(dAtA[i:], *m.Nodes) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Nodes))) + i-- + dAtA[i] = 0x22 + } + if m.Pods != nil { + i -= len(*m.Pods) + copy(dAtA[i:], *m.Pods) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Pods))) + i-- + dAtA[i] = 0x1a + } + if m.ProviderConfig != nil { + { + size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NginxIngress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NginxIngress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NginxIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Addon.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.ExternalTrafficPolicy != nil { + i -= len(*m.ExternalTrafficPolicy) + copy(dAtA[i:], *m.ExternalTrafficPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ExternalTrafficPolicy))) + i-- + dAtA[i] = 0x1a + } + if len(m.Config) > 0 { + keysForConfig := make([]string, 0, len(m.Config)) + for k := range m.Config { + keysForConfig = append(keysForConfig, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForConfig) + for iNdEx := len(keysForConfig) - 1; iNdEx >= 0; iNdEx-- { + v := m.Config[string(keysForConfig[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForConfig[iNdEx]) + copy(dAtA[i:], keysForConfig[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForConfig[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.LoadBalancerSourceRanges) > 0 { + for iNdEx := len(m.LoadBalancerSourceRanges) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LoadBalancerSourceRanges[iNdEx]) + copy(dAtA[i:], m.LoadBalancerSourceRanges[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerSourceRanges[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *OIDCConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OIDCConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OIDCConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.UsernamePrefix != nil { + i -= len(*m.UsernamePrefix) + copy(dAtA[i:], *m.UsernamePrefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UsernamePrefix))) + i-- + dAtA[i] = 0x52 + } + if m.UsernameClaim != nil { + i -= len(*m.UsernameClaim) + copy(dAtA[i:], *m.UsernameClaim) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UsernameClaim))) + i-- + dAtA[i] = 0x4a + } + if len(m.SigningAlgs) > 0 { + for iNdEx := len(m.SigningAlgs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SigningAlgs[iNdEx]) + copy(dAtA[i:], m.SigningAlgs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SigningAlgs[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if len(m.RequiredClaims) > 0 { + keysForRequiredClaims := make([]string, 0, len(m.RequiredClaims)) + for k := range m.RequiredClaims { + keysForRequiredClaims = append(keysForRequiredClaims, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequiredClaims) + for iNdEx := len(keysForRequiredClaims) - 1; iNdEx >= 0; iNdEx-- { + v := m.RequiredClaims[string(keysForRequiredClaims[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForRequiredClaims[iNdEx]) + copy(dAtA[i:], keysForRequiredClaims[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequiredClaims[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if m.IssuerURL != nil { + i -= len(*m.IssuerURL) + copy(dAtA[i:], *m.IssuerURL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IssuerURL))) + i-- + dAtA[i] = 0x32 + } + if m.GroupsPrefix != nil { + i -= len(*m.GroupsPrefix) + copy(dAtA[i:], *m.GroupsPrefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GroupsPrefix))) + i-- + dAtA[i] = 0x2a + } + if m.GroupsClaim != nil { + i -= len(*m.GroupsClaim) + copy(dAtA[i:], *m.GroupsClaim) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GroupsClaim))) + i-- + dAtA[i] = 0x22 + } + if m.ClientID != nil { + i -= len(*m.ClientID) + copy(dAtA[i:], *m.ClientID) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ClientID))) + i-- + dAtA[i] = 0x1a + } + if m.ClientAuthentication != nil { + { + size, err := m.ClientAuthentication.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.CABundle != nil { + i -= len(*m.CABundle) + copy(dAtA[i:], *m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CABundle))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OpenIDConnectClientAuthentication) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpenIDConnectClientAuthentication) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OpenIDConnectClientAuthentication) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Secret != nil { + i -= len(*m.Secret) + copy(dAtA[i:], *m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Secret))) + i-- + dAtA[i] = 0x12 + } + if len(m.ExtraConfig) > 0 { + keysForExtraConfig := make([]string, 0, len(m.ExtraConfig)) + for k := range m.ExtraConfig { + keysForExtraConfig = append(keysForExtraConfig, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtraConfig) + for iNdEx := len(keysForExtraConfig) - 1; iNdEx >= 0; iNdEx-- { + v := m.ExtraConfig[string(keysForExtraConfig[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForExtraConfig[iNdEx]) + copy(dAtA[i:], keysForExtraConfig[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtraConfig[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Plant) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Plant) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Plant) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PlantList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PlantList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PlantList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PlantSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PlantSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PlantSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Endpoints) > 0 { + for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Endpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PlantStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PlantStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PlantStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ClusterInfo != nil { + { + size, err := m.ClusterInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x10 + } + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Project) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Project) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Project) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProjectList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProjectMember) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectMember) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectMember) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Roles) > 0 { + for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Roles[iNdEx]) + copy(dAtA[i:], m.Roles[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Roles[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Role) + copy(dAtA[i:], m.Role) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Role))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Subject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProjectSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Tolerations != nil { + { + size, err := m.Tolerations.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.Namespace != nil { + i -= len(*m.Namespace) + copy(dAtA[i:], *m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Namespace))) + i-- + dAtA[i] = 0x32 + } + if len(m.Members) > 0 { + for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.Purpose != nil { + i -= len(*m.Purpose) + copy(dAtA[i:], *m.Purpose) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Purpose))) + i-- + dAtA[i] = 0x22 + } + if m.Owner != nil { + { + size, err := m.Owner.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Description != nil { + i -= len(*m.Description) + copy(dAtA[i:], *m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Description))) + i-- + dAtA[i] = 0x12 + } + if m.CreatedBy != nil { + { + size, err := m.CreatedBy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ProjectStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.StaleAutoDeleteTimestamp != nil { + { + size, err := m.StaleAutoDeleteTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.StaleSinceTimestamp != nil { + { + size, err := m.StaleSinceTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ProjectTolerations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectTolerations) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectTolerations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Whitelist) > 0 { + for iNdEx := len(m.Whitelist) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Whitelist[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Defaults) > 0 { + for iNdEx := len(m.Defaults) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Defaults[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Provider) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Provider) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Provider) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Workers) > 0 { + for iNdEx := len(m.Workers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Workers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.InfrastructureConfig != nil { + { + size, err := m.InfrastructureConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ControlPlaneConfig != nil { + { + size, err := m.ControlPlaneConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Quota) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Quota) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Quota) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QuotaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuotaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QuotaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuotaSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QuotaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Metrics) > 0 { + keysForMetrics := make([]string, 0, len(m.Metrics)) + for k := range m.Metrics { + keysForMetrics = append(keysForMetrics, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetrics) + for iNdEx := len(keysForMetrics) - 1; iNdEx >= 0; iNdEx-- { + v := m.Metrics[k8s_io_api_core_v1.ResourceName(keysForMetrics[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForMetrics[iNdEx]) + copy(dAtA[i:], keysForMetrics[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetrics[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.ClusterLifetimeDays != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ClusterLifetimeDays)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Region) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Region) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Region) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.Labels[string(keysForLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForLabels[iNdEx]) + copy(dAtA[i:], keysForLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Zones) > 0 { + for iNdEx := len(m.Zones) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Zones[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.CrossVersionObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceWatchCacheSize) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceWatchCacheSize) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceWatchCacheSize) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.CacheSize)) + i-- + dAtA[i] = 0x18 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + if m.APIGroup != nil { + i -= len(*m.APIGroup) + copy(dAtA[i:], *m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SecretBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Quotas) > 0 { + for iNdEx := len(m.Quotas) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Quotas[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretBindingList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Seed) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Seed) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Seed) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SeedBackup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedBackup) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedBackup) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.Region != nil { + i -= len(*m.Region) + copy(dAtA[i:], *m.Region) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Region))) + i-- + dAtA[i] = 0x1a + } + if m.ProviderConfig != nil { + { + size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Provider) + copy(dAtA[i:], m.Provider) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provider))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SeedDNS) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedDNS) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedDNS) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Provider != nil { + { + size, err := m.Provider.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.IngressDomain != nil { + i -= len(*m.IngressDomain) + copy(dAtA[i:], *m.IngressDomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IngressDomain))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SeedDNSProvider) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedDNSProvider) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedDNSProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Zones != nil { + { + size, err := m.Zones.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Domains != nil { + { + size, err := m.Domains.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SeedList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SeedNetworks) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedNetworks) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedNetworks) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ShootDefaults != nil { + { + size, err := m.ShootDefaults.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.Services) + copy(dAtA[i:], m.Services) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Services))) + i-- + dAtA[i] = 0x1a + i -= len(m.Pods) + copy(dAtA[i:], m.Pods) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pods))) + i-- + dAtA[i] = 0x12 + if m.Nodes != nil { + i -= len(*m.Nodes) + copy(dAtA[i:], *m.Nodes) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Nodes))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SeedProvider) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedProvider) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Region) + copy(dAtA[i:], m.Region) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region))) + i-- + dAtA[i] = 0x1a + if m.ProviderConfig != nil { + { + size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SeedSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProviderTypes) > 0 { + for iNdEx := len(m.ProviderTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ProviderTypes[iNdEx]) + copy(dAtA[i:], m.ProviderTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderTypes[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SeedSettingExcessCapacityReservation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedSettingExcessCapacityReservation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedSettingExcessCapacityReservation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *SeedSettingLoadBalancerServices) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedSettingLoadBalancerServices) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedSettingLoadBalancerServices) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SeedSettingScheduling) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedSettingScheduling) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedSettingScheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Visible { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *SeedSettingShootDNS) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedSettingShootDNS) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedSettingShootDNS) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *SeedSettingVerticalPodAutoscaler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedSettingVerticalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedSettingVerticalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *SeedSettings) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedSettings) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.VerticalPodAutoscaler != nil { + { + size, err := m.VerticalPodAutoscaler.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.LoadBalancerServices != nil { + { + size, err := m.LoadBalancerServices.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.ShootDNS != nil { + { + size, err := m.ShootDNS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Scheduling != nil { + { + size, err := m.Scheduling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ExcessCapacityReservation != nil { + { + size, err := m.ExcessCapacityReservation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SeedSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Ingress != nil { + { + size, err := m.Ingress.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.Settings != nil { + { + size, err := m.Settings.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.Volume != nil { + { + size, err := m.Volume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if len(m.Taints) > 0 { + for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + { + size, err := m.Provider.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size, err := m.Networks.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.DNS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.BlockCIDRs) > 0 { + for iNdEx := len(m.BlockCIDRs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BlockCIDRs[iNdEx]) + copy(dAtA[i:], m.BlockCIDRs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BlockCIDRs[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Backup != nil { + { + size, err := m.Backup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SeedStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ClusterIdentity != nil { + i -= len(*m.ClusterIdentity) + copy(dAtA[i:], *m.ClusterIdentity) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ClusterIdentity))) + i-- + dAtA[i] = 0x2a + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x20 + if m.KubernetesVersion != nil { + i -= len(*m.KubernetesVersion) + copy(dAtA[i:], *m.KubernetesVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.KubernetesVersion))) + i-- + dAtA[i] = 0x1a + } + if m.Gardener != nil { + { + size, err := m.Gardener.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SeedTaint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedTaint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedTaint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != nil { + i -= len(*m.Value) + copy(dAtA[i:], *m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) + i-- + dAtA[i] = 0x12 + } + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SeedVolume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Providers) > 0 { + for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.MinimumSize != nil { + { + size, err := m.MinimumSize.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SeedVolumeProvider) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedVolumeProvider) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedVolumeProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Purpose) + copy(dAtA[i:], m.Purpose) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Purpose))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceAccountConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SigningKeySecret != nil { + { + size, err := m.SigningKeySecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Issuer != nil { + i -= len(*m.Issuer) + copy(dAtA[i:], *m.Issuer) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Issuer))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Shoot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Shoot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Shoot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ShootList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShootList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ShootList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ShootMachineImage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShootMachineImage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ShootMachineImage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Version != nil { + i -= len(*m.Version) + copy(dAtA[i:], *m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Version))) + i-- + dAtA[i] = 0x1a + } + if m.ProviderConfig != nil { + { + size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ShootNetworks) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShootNetworks) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ShootNetworks) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Services != nil { + i -= len(*m.Services) + copy(dAtA[i:], *m.Services) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Services))) + i-- + dAtA[i] = 0x12 + } + if m.Pods != nil { + i -= len(*m.Pods) + copy(dAtA[i:], *m.Pods) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Pods))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ShootSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShootSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ShootSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + } + if m.SeedSelector != nil { + { + size, err := m.SeedSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + if m.SeedName != nil { + i -= len(*m.SeedName) + copy(dAtA[i:], *m.SeedName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SeedName))) + i-- + dAtA[i] = 0x72 + } + i -= len(m.SecretBindingName) + copy(dAtA[i:], m.SecretBindingName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretBindingName))) + i-- + dAtA[i] = 0x6a + i -= len(m.Region) + copy(dAtA[i:], m.Region) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region))) + i-- + dAtA[i] = 0x62 + if m.Purpose != nil { + i -= len(*m.Purpose) + copy(dAtA[i:], *m.Purpose) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Purpose))) + i-- + dAtA[i] = 0x5a + } + { + size, err := m.Provider.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + if m.Monitoring != nil { + { + size, err := m.Monitoring.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.Maintenance != nil { + { + size, err := m.Maintenance.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + { + size, err := m.Networking.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.Kubernetes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + if m.Hibernation != nil { + { + size, err := m.Hibernation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Extensions) > 0 { + for iNdEx := len(m.Extensions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Extensions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.DNS != nil { + { + size, err := m.DNS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.CloudProfileName) + copy(dAtA[i:], m.CloudProfileName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CloudProfileName))) + i-- + dAtA[i] = 0x12 + if m.Addons != nil { + { + size, err := m.Addons.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ShootState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShootState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ShootState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ShootStateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShootStateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ShootStateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ShootStateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShootStateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ShootStateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Extensions) > 0 { + for iNdEx := len(m.Extensions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Extensions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Gardener) > 0 { + for iNdEx := len(m.Gardener) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Gardener[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ShootStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShootStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ShootStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ClusterIdentity != nil { + i -= len(*m.ClusterIdentity) + copy(dAtA[i:], *m.ClusterIdentity) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ClusterIdentity))) + i-- + dAtA[i] = 0x6a + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x62 + i -= len(m.TechnicalID) + copy(dAtA[i:], m.TechnicalID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TechnicalID))) + i-- + dAtA[i] = 0x5a + if m.Seed != nil { + i -= len(*m.Seed) + copy(dAtA[i:], *m.Seed) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Seed))) + i-- + dAtA[i] = 0x52 + } + if m.RetryCycleStartTime != nil { + { + size, err := m.RetryCycleStartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x40 + if len(m.LastErrors) > 0 { + for iNdEx := len(m.LastErrors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.LastErrors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.LastError != nil { + { + size, err := m.LastError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.LastOperation != nil { + { + size, err := m.LastOperation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i-- + if m.IsHibernated { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + { + size, err := m.Gardener.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Constraints) > 0 { + for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Constraints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Toleration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Toleration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Toleration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != nil { + i -= len(*m.Value) + copy(dAtA[i:], *m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) + i-- + dAtA[i] = 0x12 + } + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VerticalPodAutoscaler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VerticalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VerticalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RecommenderInterval != nil { + { + size, err := m.RecommenderInterval.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.UpdaterInterval != nil { + { + size, err := m.UpdaterInterval.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.RecommendationMarginFraction != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.RecommendationMarginFraction)))) + i-- + dAtA[i] = 0x31 + } + if m.EvictionTolerance != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.EvictionTolerance)))) + i-- + dAtA[i] = 0x29 + } + if m.EvictionRateLimit != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.EvictionRateLimit)))) + i-- + dAtA[i] = 0x21 + } + if m.EvictionRateBurst != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.EvictionRateBurst)) + i-- + dAtA[i] = 0x18 + } + if m.EvictAfterOOMThreshold != nil { + { + size, err := m.EvictAfterOOMThreshold.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i-- + if m.Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *Volume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Volume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Volume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Encrypted != nil { + i-- + if *m.Encrypted { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + i -= len(m.VolumeSize) + copy(dAtA[i:], m.VolumeSize) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeSize))) + i-- + dAtA[i] = 0x1a + if m.Type != nil { + i -= len(*m.Type) + copy(dAtA[i:], *m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i-- + dAtA[i] = 0x12 + } + if m.Name != nil { + i -= len(*m.Name) + copy(dAtA[i:], *m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VolumeType) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeType) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeType) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Usable != nil { + i-- + if *m.Usable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Class) + copy(dAtA[i:], m.Class) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Class))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WatchCacheSizes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchCacheSizes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WatchCacheSizes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Default != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Default)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Worker) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Worker) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Worker) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MachineControllerManagerSettings != nil { + { + size, err := m.MachineControllerManagerSettings.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if m.SystemComponents != nil { + { + size, err := m.SystemComponents.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if len(m.Zones) > 0 { + for iNdEx := len(m.Zones) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Zones[iNdEx]) + copy(dAtA[i:], m.Zones[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Zones[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + } + if m.KubeletDataVolumeName != nil { + i -= len(*m.KubeletDataVolumeName) + copy(dAtA[i:], *m.KubeletDataVolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.KubeletDataVolumeName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.DataVolumes) > 0 { + for iNdEx := len(m.DataVolumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DataVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + } + if m.Volume != nil { + { + size, err := m.Volume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if len(m.Taints) > 0 { + for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + } + if m.ProviderConfig != nil { + { + size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Minimum)) + i-- + dAtA[i] = 0x48 + i = encodeVarintGenerated(dAtA, i, uint64(m.Maximum)) + i-- + dAtA[i] = 0x40 + { + size, err := m.Machine.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x32 + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.Labels[string(keysForLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForLabels[iNdEx]) + copy(dAtA[i:], keysForLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if m.Kubernetes != nil { + { + size, err := m.Kubernetes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.CRI != nil { + { + size, err := m.CRI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.CABundle != nil { + i -= len(*m.CABundle) + copy(dAtA[i:], *m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CABundle))) + i-- + dAtA[i] = 0x12 + } + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *WorkerKubernetes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkerKubernetes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkerKubernetes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Kubelet != nil { + { + size, err := m.Kubelet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkerSystemComponents) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkerSystemComponents) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkerSystemComponents) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Allow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Addon) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} + +func (m *Addons) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.KubernetesDashboard != nil { + l = m.KubernetesDashboard.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NginxIngress != nil { + l = m.NginxIngress.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AdmissionPlugin) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Alerting) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.EmailReceivers) > 0 { + for _, s := range m.EmailReceivers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *AuditConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AuditPolicy != nil { + l = m.AuditPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AuditPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigMapRef != nil { + l = m.ConfigMapRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AvailabilityZone) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UnavailableMachineTypes) > 0 { + for _, s := range m.UnavailableMachineTypes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.UnavailableVolumeTypes) > 0 { + for _, s := range m.UnavailableVolumeTypes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BackupBucket) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BackupBucketList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BackupBucketProvider) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Region) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BackupBucketSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Provider.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.ProviderConfig != nil { + l = m.ProviderConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Seed != nil { + l = len(*m.Seed) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BackupBucketStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProviderStatus != nil { + l = m.ProviderStatus.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LastOperation != nil { + l = m.LastOperation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LastError != nil { + l = m.LastError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if m.GeneratedSecretRef != nil { + l = m.GeneratedSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BackupEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BackupEntryList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BackupEntrySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BucketName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Seed != nil { + l = len(*m.Seed) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BackupEntryStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LastOperation != nil { + l = m.LastOperation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LastError != nil { + l = m.LastError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + return n +} + +func (m *CRI) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ContainerRuntimes) > 0 { + for _, e := range m.ContainerRuntimes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CloudInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Region) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CloudProfile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CloudProfileList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CloudProfileSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CABundle != nil { + l = len(*m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Kubernetes.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.MachineImages) > 0 { + for _, e := range m.MachineImages { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MachineTypes) > 0 { + for _, e := range m.MachineTypes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ProviderConfig != nil { + l = m.ProviderConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Regions) > 0 { + for _, e := range m.Regions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.SeedSelector != nil { + l = m.SeedSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeTypes) > 0 { + for _, e := range m.VolumeTypes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterAutoscaler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScaleDownDelayAfterAdd != nil { + l = m.ScaleDownDelayAfterAdd.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ScaleDownDelayAfterDelete != nil { + l = m.ScaleDownDelayAfterDelete.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ScaleDownDelayAfterFailure != nil { + l = m.ScaleDownDelayAfterFailure.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ScaleDownUnneededTime != nil { + l = m.ScaleDownUnneededTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ScaleDownUtilizationThreshold != nil { + n += 9 + } + if m.ScanInterval != nil { + l = m.ScanInterval.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ClusterInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Cloud.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Kubernetes.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Condition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Codes) > 0 { + for _, s := range m.Codes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ContainerRuntime) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.ProviderConfig != nil { + l = m.ProviderConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ControllerDeployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.ProviderConfig != nil { + l = m.ProviderConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Policy != nil { + l = len(*m.Policy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SeedSelector != nil { + l = m.SeedSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ControllerInstallation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ControllerInstallationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ControllerInstallationSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.RegistrationRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.SeedRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ControllerInstallationStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ProviderStatus != nil { + l = m.ProviderStatus.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ControllerRegistration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ControllerRegistrationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ControllerRegistrationSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Deployment != nil { + l = m.Deployment.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ControllerResource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.GloballyEnabled != nil { + n += 2 + } + if m.ReconcileTimeout != nil { + l = m.ReconcileTimeout.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Primary != nil { + n += 2 + } + return n +} + +func (m *DNS) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Domain != nil { + l = len(*m.Domain) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Providers) > 0 { + for _, e := range m.Providers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DNSIncludeExclude) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Include) > 0 { + for _, s := range m.Include { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Exclude) > 0 { + for _, s := range m.Exclude { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DNSProvider) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Domains != nil { + l = m.Domains.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Primary != nil { + n += 2 + } + if m.SecretName != nil { + l = len(*m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Type != nil { + l = len(*m.Type) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Zones != nil { + l = m.Zones.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DataVolume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Type != nil { + l = len(*m.Type) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.VolumeSize) + n += 1 + l + sovGenerated(uint64(l)) + if m.Encrypted != nil { + n += 2 + } + return n +} + +func (m *Endpoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Purpose) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExpirableVersion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExpirationDate != nil { + l = m.ExpirationDate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Classification != nil { + l = len(*m.Classification) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Extension) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.ProviderConfig != nil { + l = m.ProviderConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Disabled != nil { + n += 2 + } + return n +} + +func (m *ExtensionResourceState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Purpose != nil { + l = len(*m.Purpose) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.State != nil { + l = m.State.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Gardener) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GardenerResourceData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Data.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Hibernation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Enabled != nil { + n += 2 + } + if len(m.Schedules) > 0 { + for _, e := range m.Schedules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HibernationSchedule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Start != nil { + l = len(*m.Start) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.End != nil { + l = len(*m.End) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Location != nil { + l = len(*m.Location) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HorizontalPodAutoscalerConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CPUInitializationPeriod != nil { + l = m.CPUInitializationPeriod.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownscaleDelay != nil { + l = m.DownscaleDelay.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownscaleStabilization != nil { + l = m.DownscaleStabilization.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.InitialReadinessDelay != nil { + l = m.InitialReadinessDelay.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SyncPeriod != nil { + l = m.SyncPeriod.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Tolerance != nil { + n += 9 + } + if m.UpscaleDelay != nil { + l = m.UpscaleDelay.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Ingress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Domain) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Controller.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressController) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if m.ProviderConfig != nil { + l = m.ProviderConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *KubeAPIServerConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.KubernetesConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AdmissionPlugins) > 0 { + for _, e := range m.AdmissionPlugins { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIAudiences) > 0 { + for _, s := range m.APIAudiences { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AuditConfig != nil { + l = m.AuditConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EnableBasicAuthentication != nil { + n += 2 + } + if m.OIDCConfig != nil { + l = m.OIDCConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.RuntimeConfig) > 0 { + for k, v := range m.RuntimeConfig { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + 1 + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ServiceAccountConfig != nil { + l = m.ServiceAccountConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.WatchCacheSizes != nil { + l = m.WatchCacheSizes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Requests != nil { + l = m.Requests.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *KubeAPIServerRequests) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxNonMutatingInflight != nil { + n += 1 + sovGenerated(uint64(*m.MaxNonMutatingInflight)) + } + if m.MaxMutatingInflight != nil { + n += 1 + sovGenerated(uint64(*m.MaxMutatingInflight)) + } + return n +} + +func (m *KubeControllerManagerConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.KubernetesConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.HorizontalPodAutoscalerConfig != nil { + l = m.HorizontalPodAutoscalerConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeCIDRMaskSize != nil { + n += 1 + sovGenerated(uint64(*m.NodeCIDRMaskSize)) + } + if m.PodEvictionTimeout != nil { + l = m.PodEvictionTimeout.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *KubeProxyConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.KubernetesConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Mode != nil { + l = len(*m.Mode) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *KubeSchedulerConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.KubernetesConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.KubeMaxPDVols != nil { + l = len(*m.KubeMaxPDVols) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *KubeletConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.KubernetesConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.CPUCFSQuota != nil { + n += 2 + } + if m.CPUManagerPolicy != nil { + l = len(*m.CPUManagerPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EvictionHard != nil { + l = m.EvictionHard.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EvictionMaxPodGracePeriod != nil { + n += 1 + sovGenerated(uint64(*m.EvictionMaxPodGracePeriod)) + } + if m.EvictionMinimumReclaim != nil { + l = m.EvictionMinimumReclaim.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EvictionPressureTransitionPeriod != nil { + l = m.EvictionPressureTransitionPeriod.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EvictionSoft != nil { + l = m.EvictionSoft.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EvictionSoftGracePeriod != nil { + l = m.EvictionSoftGracePeriod.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxPods != nil { + n += 1 + sovGenerated(uint64(*m.MaxPods)) + } + if m.PodPIDsLimit != nil { + n += 1 + sovGenerated(uint64(*m.PodPIDsLimit)) + } + if m.ImagePullProgressDeadline != nil { + l = m.ImagePullProgressDeadline.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FailSwapOn != nil { + n += 2 + } + if m.KubeReserved != nil { + l = m.KubeReserved.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SystemReserved != nil { + l = m.SystemReserved.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *KubeletConfigEviction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MemoryAvailable != nil { + l = len(*m.MemoryAvailable) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ImageFSAvailable != nil { + l = len(*m.ImageFSAvailable) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ImageFSInodesFree != nil { + l = len(*m.ImageFSInodesFree) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeFSAvailable != nil { + l = len(*m.NodeFSAvailable) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeFSInodesFree != nil { + l = len(*m.NodeFSInodesFree) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *KubeletConfigEvictionMinimumReclaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MemoryAvailable != nil { + l = m.MemoryAvailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ImageFSAvailable != nil { + l = m.ImageFSAvailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ImageFSInodesFree != nil { + l = m.ImageFSInodesFree.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeFSAvailable != nil { + l = m.NodeFSAvailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeFSInodesFree != nil { + l = m.NodeFSInodesFree.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *KubeletConfigEvictionSoftGracePeriod) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MemoryAvailable != nil { + l = m.MemoryAvailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ImageFSAvailable != nil { + l = m.ImageFSAvailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ImageFSInodesFree != nil { + l = m.ImageFSInodesFree.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeFSAvailable != nil { + l = m.NodeFSAvailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeFSInodesFree != nil { + l = m.NodeFSInodesFree.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *KubeletConfigReserved) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CPU != nil { + l = m.CPU.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Memory != nil { + l = m.Memory.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EphemeralStorage != nil { + l = m.EphemeralStorage.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PID != nil { + l = m.PID.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Kubernetes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AllowPrivilegedContainers != nil { + n += 2 + } + if m.ClusterAutoscaler != nil { + l = m.ClusterAutoscaler.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.KubeAPIServer != nil { + l = m.KubeAPIServer.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.KubeControllerManager != nil { + l = m.KubeControllerManager.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.KubeScheduler != nil { + l = m.KubeScheduler.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.KubeProxy != nil { + l = m.KubeProxy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Kubelet != nil { + l = m.Kubelet.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + if m.VerticalPodAutoscaler != nil { + l = m.VerticalPodAutoscaler.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *KubernetesConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.FeatureGates) > 0 { + for k, v := range m.FeatureGates { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + 1 + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *KubernetesDashboard) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AuthenticationMode != nil { + l = len(*m.AuthenticationMode) + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Addon.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *KubernetesInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *KubernetesSettings) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LastError) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + if m.TaskID != nil { + l = len(*m.TaskID) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Codes) > 0 { + for _, s := range m.Codes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.LastUpdateTime != nil { + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *LastOperation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Progress)) + l = len(m.State) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Machine) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Image != nil { + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MachineControllerManagerSettings) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MachineDrainTimeout != nil { + l = m.MachineDrainTimeout.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MachineHealthTimeout != nil { + l = m.MachineHealthTimeout.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MachineCreationTimeout != nil { + l = m.MachineCreationTimeout.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxEvictRetries != nil { + n += 1 + sovGenerated(uint64(*m.MaxEvictRetries)) + } + if len(m.NodeConditions) > 0 { + for _, s := range m.NodeConditions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MachineImage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MachineImageVersion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ExpirableVersion.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.CRI) > 0 { + for _, e := range m.CRI { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MachineType) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.CPU.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.GPU.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Memory.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Storage != nil { + l = m.Storage.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Usable != nil { + n += 2 + } + return n +} + +func (m *MachineTypeStorage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Class) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StorageSize.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Maintenance) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AutoUpdate != nil { + l = m.AutoUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeWindow != nil { + l = m.TimeWindow.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConfineSpecUpdateRollout != nil { + n += 2 + } + return n +} + +func (m *MaintenanceAutoUpdate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + return n +} + +func (m *MaintenanceTimeWindow) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Begin) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.End) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Monitoring) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Alerting != nil { + l = m.Alerting.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NamedResourceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ResourceRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Networking) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.ProviderConfig != nil { + l = m.ProviderConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = len(*m.Pods) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Nodes != nil { + l = len(*m.Nodes) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Services != nil { + l = len(*m.Services) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NginxIngress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LoadBalancerSourceRanges) > 0 { + for _, s := range m.LoadBalancerSourceRanges { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for k, v := range m.Config { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ExternalTrafficPolicy != nil { + l = len(*m.ExternalTrafficPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Addon.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *OIDCConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CABundle != nil { + l = len(*m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ClientAuthentication != nil { + l = m.ClientAuthentication.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ClientID != nil { + l = len(*m.ClientID) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GroupsClaim != nil { + l = len(*m.GroupsClaim) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GroupsPrefix != nil { + l = len(*m.GroupsPrefix) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IssuerURL != nil { + l = len(*m.IssuerURL) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.RequiredClaims) > 0 { + for k, v := range m.RequiredClaims { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.SigningAlgs) > 0 { + for _, s := range m.SigningAlgs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.UsernameClaim != nil { + l = len(*m.UsernameClaim) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.UsernamePrefix != nil { + l = len(*m.UsernamePrefix) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *OpenIDConnectClientAuthentication) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ExtraConfig) > 0 { + for k, v := range m.ExtraConfig { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Secret != nil { + l = len(*m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Plant) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PlantList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PlantSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Endpoints) > 0 { + for _, e := range m.Endpoints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PlantStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + if m.ClusterInfo != nil { + l = m.ClusterInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Project) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ProjectList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ProjectMember) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Subject.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Role) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ProjectSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CreatedBy != nil { + l = m.CreatedBy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Description != nil { + l = len(*m.Description) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Owner != nil { + l = m.Owner.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Purpose != nil { + l = len(*m.Purpose) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Namespace != nil { + l = len(*m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Tolerations != nil { + l = m.Tolerations.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ProjectStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if m.StaleSinceTimestamp != nil { + l = m.StaleSinceTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.StaleAutoDeleteTimestamp != nil { + l = m.StaleAutoDeleteTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ProjectTolerations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Defaults) > 0 { + for _, e := range m.Defaults { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Whitelist) > 0 { + for _, e := range m.Whitelist { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Provider) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.ControlPlaneConfig != nil { + l = m.ControlPlaneConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.InfrastructureConfig != nil { + l = m.InfrastructureConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Workers) > 0 { + for _, e := range m.Workers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Quota) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *QuotaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *QuotaSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClusterLifetimeDays != nil { + n += 1 + sovGenerated(uint64(*m.ClusterLifetimeDays)) + } + if len(m.Metrics) > 0 { + for k, v := range m.Metrics { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.Scope.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Region) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Zones) > 0 { + for _, e := range m.Zones { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ResourceData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.CrossVersionObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Data.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceWatchCacheSize) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.APIGroup != nil { + l = len(*m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.CacheSize)) + return n +} + +func (m *SecretBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Quotas) > 0 { + for _, e := range m.Quotas { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SecretBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Seed) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SeedBackup) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Provider) + n += 1 + l + sovGenerated(uint64(l)) + if m.ProviderConfig != nil { + l = m.ProviderConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Region != nil { + l = len(*m.Region) + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SeedDNS) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IngressDomain != nil { + l = len(*m.IngressDomain) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Provider != nil { + l = m.Provider.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SeedDNSProvider) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Domains != nil { + l = m.Domains.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Zones != nil { + l = m.Zones.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SeedList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SeedNetworks) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Nodes != nil { + l = len(*m.Nodes) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Pods) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Services) + n += 1 + l + sovGenerated(uint64(l)) + if m.ShootDefaults != nil { + l = m.ShootDefaults.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SeedProvider) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.ProviderConfig != nil { + l = m.ProviderConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Region) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SeedSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ProviderTypes) > 0 { + for _, s := range m.ProviderTypes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SeedSettingExcessCapacityReservation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} + +func (m *SeedSettingLoadBalancerServices) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SeedSettingScheduling) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} + +func (m *SeedSettingShootDNS) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} + +func (m *SeedSettingVerticalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} + +func (m *SeedSettings) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExcessCapacityReservation != nil { + l = m.ExcessCapacityReservation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scheduling != nil { + l = m.Scheduling.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ShootDNS != nil { + l = m.ShootDNS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LoadBalancerServices != nil { + l = m.LoadBalancerServices.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VerticalPodAutoscaler != nil { + l = m.VerticalPodAutoscaler.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SeedSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Backup != nil { + l = m.Backup.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.BlockCIDRs) > 0 { + for _, s := range m.BlockCIDRs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DNS.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Networks.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Provider.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Taints) > 0 { + for _, e := range m.Taints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Volume != nil { + l = m.Volume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Settings != nil { + l = m.Settings.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Ingress != nil { + l = m.Ingress.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SeedStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Gardener != nil { + l = m.Gardener.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.KubernetesVersion != nil { + l = len(*m.KubernetesVersion) + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if m.ClusterIdentity != nil { + l = len(*m.ClusterIdentity) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SeedTaint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SeedVolume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinimumSize != nil { + l = m.MinimumSize.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Providers) > 0 { + for _, e := range m.Providers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SeedVolumeProvider) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Purpose) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceAccountConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Issuer != nil { + l = len(*m.Issuer) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SigningKeySecret != nil { + l = m.SigningKeySecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Shoot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ShootList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ShootMachineImage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.ProviderConfig != nil { + l = m.ProviderConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Version != nil { + l = len(*m.Version) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ShootNetworks) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pods != nil { + l = len(*m.Pods) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Services != nil { + l = len(*m.Services) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ShootSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Addons != nil { + l = m.Addons.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.CloudProfileName) + n += 1 + l + sovGenerated(uint64(l)) + if m.DNS != nil { + l = m.DNS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Extensions) > 0 { + for _, e := range m.Extensions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Hibernation != nil { + l = m.Hibernation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Kubernetes.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Networking.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Maintenance != nil { + l = m.Maintenance.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Monitoring != nil { + l = m.Monitoring.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Provider.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Purpose != nil { + l = len(*m.Purpose) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Region) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SecretBindingName) + n += 1 + l + sovGenerated(uint64(l)) + if m.SeedName != nil { + l = len(*m.SeedName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SeedSelector != nil { + l = m.SeedSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ShootState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ShootStateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ShootStateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Gardener) > 0 { + for _, e := range m.Gardener { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extensions) > 0 { + for _, e := range m.Extensions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ShootStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Constraints) > 0 { + for _, e := range m.Constraints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Gardener.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.LastOperation != nil { + l = m.LastOperation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LastError != nil { + l = m.LastError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.LastErrors) > 0 { + for _, e := range m.LastErrors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if m.RetryCycleStartTime != nil { + l = m.RetryCycleStartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Seed != nil { + l = len(*m.Seed) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.TechnicalID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if m.ClusterIdentity != nil { + l = len(*m.ClusterIdentity) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Toleration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VerticalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if m.EvictAfterOOMThreshold != nil { + l = m.EvictAfterOOMThreshold.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EvictionRateBurst != nil { + n += 1 + sovGenerated(uint64(*m.EvictionRateBurst)) + } + if m.EvictionRateLimit != nil { + n += 9 + } + if m.EvictionTolerance != nil { + n += 9 + } + if m.RecommendationMarginFraction != nil { + n += 9 + } + if m.UpdaterInterval != nil { + l = m.UpdaterInterval.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RecommenderInterval != nil { + l = m.RecommenderInterval.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Volume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Type != nil { + l = len(*m.Type) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.VolumeSize) + n += 1 + l + sovGenerated(uint64(l)) + if m.Encrypted != nil { + n += 2 + } + return n +} + +func (m *VolumeType) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Class) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Usable != nil { + n += 2 + } + return n +} + +func (m *WatchCacheSizes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Default != nil { + n += 1 + sovGenerated(uint64(*m.Default)) + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Worker) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.CABundle != nil { + l = len(*m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CRI != nil { + l = m.CRI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Kubernetes != nil { + l = m.Kubernetes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Machine.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Maximum)) + n += 1 + sovGenerated(uint64(m.Minimum)) + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ProviderConfig != nil { + l = m.ProviderConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Taints) > 0 { + for _, e := range m.Taints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Volume != nil { + l = m.Volume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.DataVolumes) > 0 { + for _, e := range m.DataVolumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.KubeletDataVolumeName != nil { + l = len(*m.KubeletDataVolumeName) + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.Zones) > 0 { + for _, s := range m.Zones { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.SystemComponents != nil { + l = m.SystemComponents.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.MachineControllerManagerSettings != nil { + l = m.MachineControllerManagerSettings.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WorkerKubernetes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kubelet != nil { + l = m.Kubelet.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WorkerSystemComponents) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Addon) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Addon{`, + `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`, + `}`, + }, "") + return s +} +func (this *Addons) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Addons{`, + `KubernetesDashboard:` + strings.Replace(this.KubernetesDashboard.String(), "KubernetesDashboard", "KubernetesDashboard", 1) + `,`, + `NginxIngress:` + strings.Replace(this.NginxIngress.String(), "NginxIngress", "NginxIngress", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AdmissionPlugin) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdmissionPlugin{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "RawExtension", "runtime.RawExtension", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Alerting) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Alerting{`, + `EmailReceivers:` + fmt.Sprintf("%v", this.EmailReceivers) + `,`, + `}`, + }, "") + return s +} +func (this *AuditConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditConfig{`, + `AuditPolicy:` + strings.Replace(this.AuditPolicy.String(), "AuditPolicy", "AuditPolicy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AuditPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditPolicy{`, + `ConfigMapRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapRef), "ObjectReference", "v1.ObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AvailabilityZone) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AvailabilityZone{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UnavailableMachineTypes:` + fmt.Sprintf("%v", this.UnavailableMachineTypes) + `,`, + `UnavailableVolumeTypes:` + fmt.Sprintf("%v", this.UnavailableVolumeTypes) + `,`, + `}`, + }, "") + return s +} +func (this *BackupBucket) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BackupBucket{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BackupBucketSpec", "BackupBucketSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "BackupBucketStatus", "BackupBucketStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BackupBucketList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]BackupBucket{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "BackupBucket", "BackupBucket", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&BackupBucketList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *BackupBucketProvider) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BackupBucketProvider{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Region:` + fmt.Sprintf("%v", this.Region) + `,`, + `}`, + }, "") + return s +} +func (this *BackupBucketSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BackupBucketSpec{`, + `Provider:` + strings.Replace(strings.Replace(this.Provider.String(), "BackupBucketProvider", "BackupBucketProvider", 1), `&`, ``, 1) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, + `SecretRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "v1.SecretReference", 1), `&`, ``, 1) + `,`, + `Seed:` + valueToStringGenerated(this.Seed) + `,`, + `}`, + }, "") + return s +} +func (this *BackupBucketStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BackupBucketStatus{`, + `ProviderStatus:` + strings.Replace(fmt.Sprintf("%v", this.ProviderStatus), "RawExtension", "runtime.RawExtension", 1) + `,`, + `LastOperation:` + strings.Replace(this.LastOperation.String(), "LastOperation", "LastOperation", 1) + `,`, + `LastError:` + strings.Replace(this.LastError.String(), "LastError", "LastError", 1) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `GeneratedSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.GeneratedSecretRef), "SecretReference", "v1.SecretReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BackupEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BackupEntry{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BackupEntrySpec", "BackupEntrySpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "BackupEntryStatus", "BackupEntryStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BackupEntryList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]BackupEntry{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "BackupEntry", "BackupEntry", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&BackupEntryList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *BackupEntrySpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BackupEntrySpec{`, + `BucketName:` + fmt.Sprintf("%v", this.BucketName) + `,`, + `Seed:` + valueToStringGenerated(this.Seed) + `,`, + `}`, + }, "") + return s +} +func (this *BackupEntryStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BackupEntryStatus{`, + `LastOperation:` + strings.Replace(this.LastOperation.String(), "LastOperation", "LastOperation", 1) + `,`, + `LastError:` + strings.Replace(this.LastError.String(), "LastError", "LastError", 1) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `}`, + }, "") + return s +} +func (this *CRI) String() string { + if this == nil { + return "nil" + } + repeatedStringForContainerRuntimes := "[]ContainerRuntime{" + for _, f := range this.ContainerRuntimes { + repeatedStringForContainerRuntimes += strings.Replace(strings.Replace(f.String(), "ContainerRuntime", "ContainerRuntime", 1), `&`, ``, 1) + "," + } + repeatedStringForContainerRuntimes += "}" + s := strings.Join([]string{`&CRI{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ContainerRuntimes:` + repeatedStringForContainerRuntimes + `,`, + `}`, + }, "") + return s +} +func (this *CloudInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CloudInfo{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Region:` + fmt.Sprintf("%v", this.Region) + `,`, + `}`, + }, "") + return s +} +func (this *CloudProfile) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CloudProfile{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CloudProfileSpec", "CloudProfileSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CloudProfileList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CloudProfile{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CloudProfile", "CloudProfile", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CloudProfileList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *CloudProfileSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForMachineImages := "[]MachineImage{" + for _, f := range this.MachineImages { + repeatedStringForMachineImages += strings.Replace(strings.Replace(f.String(), "MachineImage", "MachineImage", 1), `&`, ``, 1) + "," + } + repeatedStringForMachineImages += "}" + repeatedStringForMachineTypes := "[]MachineType{" + for _, f := range this.MachineTypes { + repeatedStringForMachineTypes += strings.Replace(strings.Replace(f.String(), "MachineType", "MachineType", 1), `&`, ``, 1) + "," + } + repeatedStringForMachineTypes += "}" + repeatedStringForRegions := "[]Region{" + for _, f := range this.Regions { + repeatedStringForRegions += strings.Replace(strings.Replace(f.String(), "Region", "Region", 1), `&`, ``, 1) + "," + } + repeatedStringForRegions += "}" + repeatedStringForVolumeTypes := "[]VolumeType{" + for _, f := range this.VolumeTypes { + repeatedStringForVolumeTypes += strings.Replace(strings.Replace(f.String(), "VolumeType", "VolumeType", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeTypes += "}" + s := strings.Join([]string{`&CloudProfileSpec{`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `Kubernetes:` + strings.Replace(strings.Replace(this.Kubernetes.String(), "KubernetesSettings", "KubernetesSettings", 1), `&`, ``, 1) + `,`, + `MachineImages:` + repeatedStringForMachineImages + `,`, + `MachineTypes:` + repeatedStringForMachineTypes + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, + `Regions:` + repeatedStringForRegions + `,`, + `SeedSelector:` + strings.Replace(this.SeedSelector.String(), "SeedSelector", "SeedSelector", 1) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `VolumeTypes:` + repeatedStringForVolumeTypes + `,`, + `}`, + }, "") + return s +} +func (this *ClusterAutoscaler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterAutoscaler{`, + `ScaleDownDelayAfterAdd:` + strings.Replace(fmt.Sprintf("%v", this.ScaleDownDelayAfterAdd), "Duration", "v11.Duration", 1) + `,`, + `ScaleDownDelayAfterDelete:` + strings.Replace(fmt.Sprintf("%v", this.ScaleDownDelayAfterDelete), "Duration", "v11.Duration", 1) + `,`, + `ScaleDownDelayAfterFailure:` + strings.Replace(fmt.Sprintf("%v", this.ScaleDownDelayAfterFailure), "Duration", "v11.Duration", 1) + `,`, + `ScaleDownUnneededTime:` + strings.Replace(fmt.Sprintf("%v", this.ScaleDownUnneededTime), "Duration", "v11.Duration", 1) + `,`, + `ScaleDownUtilizationThreshold:` + valueToStringGenerated(this.ScaleDownUtilizationThreshold) + `,`, + `ScanInterval:` + strings.Replace(fmt.Sprintf("%v", this.ScanInterval), "Duration", "v11.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterInfo{`, + `Cloud:` + strings.Replace(strings.Replace(this.Cloud.String(), "CloudInfo", "CloudInfo", 1), `&`, ``, 1) + `,`, + `Kubernetes:` + strings.Replace(strings.Replace(this.Kubernetes.String(), "KubernetesInfo", "KubernetesInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Condition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Condition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Codes:` + fmt.Sprintf("%v", this.Codes) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerRuntime) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerRuntime{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ControllerDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerDeployment{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, + `Policy:` + valueToStringGenerated(this.Policy) + `,`, + `SeedSelector:` + strings.Replace(fmt.Sprintf("%v", this.SeedSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ControllerInstallation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerInstallation{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ControllerInstallationSpec", "ControllerInstallationSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ControllerInstallationStatus", "ControllerInstallationStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ControllerInstallationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ControllerInstallation{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerInstallation", "ControllerInstallation", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ControllerInstallationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ControllerInstallationSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerInstallationSpec{`, + `RegistrationRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RegistrationRef), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `SeedRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SeedRef), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ControllerInstallationStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ControllerInstallationStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `ProviderStatus:` + strings.Replace(fmt.Sprintf("%v", this.ProviderStatus), "RawExtension", "runtime.RawExtension", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ControllerRegistration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerRegistration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ControllerRegistrationSpec", "ControllerRegistrationSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ControllerRegistrationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ControllerRegistration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRegistration", "ControllerRegistration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ControllerRegistrationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ControllerRegistrationSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForResources := "[]ControllerResource{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "ControllerResource", "ControllerResource", 1), `&`, ``, 1) + "," + } + repeatedStringForResources += "}" + s := strings.Join([]string{`&ControllerRegistrationSpec{`, + `Resources:` + repeatedStringForResources + `,`, + `Deployment:` + strings.Replace(this.Deployment.String(), "ControllerDeployment", "ControllerDeployment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ControllerResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerResource{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `GloballyEnabled:` + valueToStringGenerated(this.GloballyEnabled) + `,`, + `ReconcileTimeout:` + strings.Replace(fmt.Sprintf("%v", this.ReconcileTimeout), "Duration", "v11.Duration", 1) + `,`, + `Primary:` + valueToStringGenerated(this.Primary) + `,`, + `}`, + }, "") + return s +} +func (this *DNS) String() string { + if this == nil { + return "nil" + } + repeatedStringForProviders := "[]DNSProvider{" + for _, f := range this.Providers { + repeatedStringForProviders += strings.Replace(strings.Replace(f.String(), "DNSProvider", "DNSProvider", 1), `&`, ``, 1) + "," + } + repeatedStringForProviders += "}" + s := strings.Join([]string{`&DNS{`, + `Domain:` + valueToStringGenerated(this.Domain) + `,`, + `Providers:` + repeatedStringForProviders + `,`, + `}`, + }, "") + return s +} +func (this *DNSIncludeExclude) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DNSIncludeExclude{`, + `Include:` + fmt.Sprintf("%v", this.Include) + `,`, + `Exclude:` + fmt.Sprintf("%v", this.Exclude) + `,`, + `}`, + }, "") + return s +} +func (this *DNSProvider) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DNSProvider{`, + `Domains:` + strings.Replace(this.Domains.String(), "DNSIncludeExclude", "DNSIncludeExclude", 1) + `,`, + `Primary:` + valueToStringGenerated(this.Primary) + `,`, + `SecretName:` + valueToStringGenerated(this.SecretName) + `,`, + `Type:` + valueToStringGenerated(this.Type) + `,`, + `Zones:` + strings.Replace(this.Zones.String(), "DNSIncludeExclude", "DNSIncludeExclude", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DataVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DataVolume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Type:` + valueToStringGenerated(this.Type) + `,`, + `VolumeSize:` + fmt.Sprintf("%v", this.VolumeSize) + `,`, + `Encrypted:` + valueToStringGenerated(this.Encrypted) + `,`, + `}`, + }, "") + return s +} +func (this *Endpoint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Endpoint{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `Purpose:` + fmt.Sprintf("%v", this.Purpose) + `,`, + `}`, + }, "") + return s +} +func (this *ExpirableVersion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExpirableVersion{`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `ExpirationDate:` + strings.Replace(fmt.Sprintf("%v", this.ExpirationDate), "Time", "v11.Time", 1) + `,`, + `Classification:` + valueToStringGenerated(this.Classification) + `,`, + `}`, + }, "") + return s +} +func (this *Extension) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Extension{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, + `Disabled:` + valueToStringGenerated(this.Disabled) + `,`, + `}`, + }, "") + return s +} +func (this *ExtensionResourceState) String() string { + if this == nil { + return "nil" + } + repeatedStringForResources := "[]NamedResourceReference{" + for _, f := range this.Resources { + repeatedStringForResources += fmt.Sprintf("%v", f) + "," + } + repeatedStringForResources += "}" + s := strings.Join([]string{`&ExtensionResourceState{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + valueToStringGenerated(this.Name) + `,`, + `Purpose:` + valueToStringGenerated(this.Purpose) + `,`, + `State:` + strings.Replace(fmt.Sprintf("%v", this.State), "RawExtension", "runtime.RawExtension", 1) + `,`, + `Resources:` + repeatedStringForResources + `,`, + `}`, + }, "") + return s +} +func (this *Gardener) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Gardener{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `}`, + }, "") + return s +} +func (this *GardenerResourceData) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GardenerResourceData{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Hibernation) String() string { + if this == nil { + return "nil" + } + repeatedStringForSchedules := "[]HibernationSchedule{" + for _, f := range this.Schedules { + repeatedStringForSchedules += strings.Replace(strings.Replace(f.String(), "HibernationSchedule", "HibernationSchedule", 1), `&`, ``, 1) + "," + } + repeatedStringForSchedules += "}" + s := strings.Join([]string{`&Hibernation{`, + `Enabled:` + valueToStringGenerated(this.Enabled) + `,`, + `Schedules:` + repeatedStringForSchedules + `,`, + `}`, + }, "") + return s +} +func (this *HibernationSchedule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HibernationSchedule{`, + `Start:` + valueToStringGenerated(this.Start) + `,`, + `End:` + valueToStringGenerated(this.End) + `,`, + `Location:` + valueToStringGenerated(this.Location) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerConfig{`, + `CPUInitializationPeriod:` + strings.Replace(fmt.Sprintf("%v", this.CPUInitializationPeriod), "Duration", "v11.Duration", 1) + `,`, + `DownscaleDelay:` + strings.Replace(fmt.Sprintf("%v", this.DownscaleDelay), "Duration", "v11.Duration", 1) + `,`, + `DownscaleStabilization:` + strings.Replace(fmt.Sprintf("%v", this.DownscaleStabilization), "Duration", "v11.Duration", 1) + `,`, + `InitialReadinessDelay:` + strings.Replace(fmt.Sprintf("%v", this.InitialReadinessDelay), "Duration", "v11.Duration", 1) + `,`, + `SyncPeriod:` + strings.Replace(fmt.Sprintf("%v", this.SyncPeriod), "Duration", "v11.Duration", 1) + `,`, + `Tolerance:` + valueToStringGenerated(this.Tolerance) + `,`, + `UpscaleDelay:` + strings.Replace(fmt.Sprintf("%v", this.UpscaleDelay), "Duration", "v11.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Ingress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Ingress{`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `Controller:` + strings.Replace(strings.Replace(this.Controller.String(), "IngressController", "IngressController", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressController) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressController{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, + `}`, + }, "") + return s +} +func (this *KubeAPIServerConfig) String() string { + if this == nil { + return "nil" + } + repeatedStringForAdmissionPlugins := "[]AdmissionPlugin{" + for _, f := range this.AdmissionPlugins { + repeatedStringForAdmissionPlugins += strings.Replace(strings.Replace(f.String(), "AdmissionPlugin", "AdmissionPlugin", 1), `&`, ``, 1) + "," + } + repeatedStringForAdmissionPlugins += "}" + keysForRuntimeConfig := make([]string, 0, len(this.RuntimeConfig)) + for k := range this.RuntimeConfig { + keysForRuntimeConfig = append(keysForRuntimeConfig, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRuntimeConfig) + mapStringForRuntimeConfig := "map[string]bool{" + for _, k := range keysForRuntimeConfig { + mapStringForRuntimeConfig += fmt.Sprintf("%v: %v,", k, this.RuntimeConfig[k]) + } + mapStringForRuntimeConfig += "}" + s := strings.Join([]string{`&KubeAPIServerConfig{`, + `KubernetesConfig:` + strings.Replace(strings.Replace(this.KubernetesConfig.String(), "KubernetesConfig", "KubernetesConfig", 1), `&`, ``, 1) + `,`, + `AdmissionPlugins:` + repeatedStringForAdmissionPlugins + `,`, + `APIAudiences:` + fmt.Sprintf("%v", this.APIAudiences) + `,`, + `AuditConfig:` + strings.Replace(this.AuditConfig.String(), "AuditConfig", "AuditConfig", 1) + `,`, + `EnableBasicAuthentication:` + valueToStringGenerated(this.EnableBasicAuthentication) + `,`, + `OIDCConfig:` + strings.Replace(this.OIDCConfig.String(), "OIDCConfig", "OIDCConfig", 1) + `,`, + `RuntimeConfig:` + mapStringForRuntimeConfig + `,`, + `ServiceAccountConfig:` + strings.Replace(this.ServiceAccountConfig.String(), "ServiceAccountConfig", "ServiceAccountConfig", 1) + `,`, + `WatchCacheSizes:` + strings.Replace(this.WatchCacheSizes.String(), "WatchCacheSizes", "WatchCacheSizes", 1) + `,`, + `Requests:` + strings.Replace(this.Requests.String(), "KubeAPIServerRequests", "KubeAPIServerRequests", 1) + `,`, + `}`, + }, "") + return s +} +func (this *KubeAPIServerRequests) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KubeAPIServerRequests{`, + `MaxNonMutatingInflight:` + valueToStringGenerated(this.MaxNonMutatingInflight) + `,`, + `MaxMutatingInflight:` + valueToStringGenerated(this.MaxMutatingInflight) + `,`, + `}`, + }, "") + return s +} +func (this *KubeControllerManagerConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KubeControllerManagerConfig{`, + `KubernetesConfig:` + strings.Replace(strings.Replace(this.KubernetesConfig.String(), "KubernetesConfig", "KubernetesConfig", 1), `&`, ``, 1) + `,`, + `HorizontalPodAutoscalerConfig:` + strings.Replace(this.HorizontalPodAutoscalerConfig.String(), "HorizontalPodAutoscalerConfig", "HorizontalPodAutoscalerConfig", 1) + `,`, + `NodeCIDRMaskSize:` + valueToStringGenerated(this.NodeCIDRMaskSize) + `,`, + `PodEvictionTimeout:` + strings.Replace(fmt.Sprintf("%v", this.PodEvictionTimeout), "Duration", "v11.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *KubeProxyConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KubeProxyConfig{`, + `KubernetesConfig:` + strings.Replace(strings.Replace(this.KubernetesConfig.String(), "KubernetesConfig", "KubernetesConfig", 1), `&`, ``, 1) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *KubeSchedulerConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KubeSchedulerConfig{`, + `KubernetesConfig:` + strings.Replace(strings.Replace(this.KubernetesConfig.String(), "KubernetesConfig", "KubernetesConfig", 1), `&`, ``, 1) + `,`, + `KubeMaxPDVols:` + valueToStringGenerated(this.KubeMaxPDVols) + `,`, + `}`, + }, "") + return s +} +func (this *KubeletConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KubeletConfig{`, + `KubernetesConfig:` + strings.Replace(strings.Replace(this.KubernetesConfig.String(), "KubernetesConfig", "KubernetesConfig", 1), `&`, ``, 1) + `,`, + `CPUCFSQuota:` + valueToStringGenerated(this.CPUCFSQuota) + `,`, + `CPUManagerPolicy:` + valueToStringGenerated(this.CPUManagerPolicy) + `,`, + `EvictionHard:` + strings.Replace(this.EvictionHard.String(), "KubeletConfigEviction", "KubeletConfigEviction", 1) + `,`, + `EvictionMaxPodGracePeriod:` + valueToStringGenerated(this.EvictionMaxPodGracePeriod) + `,`, + `EvictionMinimumReclaim:` + strings.Replace(this.EvictionMinimumReclaim.String(), "KubeletConfigEvictionMinimumReclaim", "KubeletConfigEvictionMinimumReclaim", 1) + `,`, + `EvictionPressureTransitionPeriod:` + strings.Replace(fmt.Sprintf("%v", this.EvictionPressureTransitionPeriod), "Duration", "v11.Duration", 1) + `,`, + `EvictionSoft:` + strings.Replace(this.EvictionSoft.String(), "KubeletConfigEviction", "KubeletConfigEviction", 1) + `,`, + `EvictionSoftGracePeriod:` + strings.Replace(this.EvictionSoftGracePeriod.String(), "KubeletConfigEvictionSoftGracePeriod", "KubeletConfigEvictionSoftGracePeriod", 1) + `,`, + `MaxPods:` + valueToStringGenerated(this.MaxPods) + `,`, + `PodPIDsLimit:` + valueToStringGenerated(this.PodPIDsLimit) + `,`, + `ImagePullProgressDeadline:` + strings.Replace(fmt.Sprintf("%v", this.ImagePullProgressDeadline), "Duration", "v11.Duration", 1) + `,`, + `FailSwapOn:` + valueToStringGenerated(this.FailSwapOn) + `,`, + `KubeReserved:` + strings.Replace(this.KubeReserved.String(), "KubeletConfigReserved", "KubeletConfigReserved", 1) + `,`, + `SystemReserved:` + strings.Replace(this.SystemReserved.String(), "KubeletConfigReserved", "KubeletConfigReserved", 1) + `,`, + `}`, + }, "") + return s +} +func (this *KubeletConfigEviction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KubeletConfigEviction{`, + `MemoryAvailable:` + valueToStringGenerated(this.MemoryAvailable) + `,`, + `ImageFSAvailable:` + valueToStringGenerated(this.ImageFSAvailable) + `,`, + `ImageFSInodesFree:` + valueToStringGenerated(this.ImageFSInodesFree) + `,`, + `NodeFSAvailable:` + valueToStringGenerated(this.NodeFSAvailable) + `,`, + `NodeFSInodesFree:` + valueToStringGenerated(this.NodeFSInodesFree) + `,`, + `}`, + }, "") + return s +} +func (this *KubeletConfigEvictionMinimumReclaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KubeletConfigEvictionMinimumReclaim{`, + `MemoryAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MemoryAvailable), "Quantity", "resource.Quantity", 1) + `,`, + `ImageFSAvailable:` + strings.Replace(fmt.Sprintf("%v", this.ImageFSAvailable), "Quantity", "resource.Quantity", 1) + `,`, + `ImageFSInodesFree:` + strings.Replace(fmt.Sprintf("%v", this.ImageFSInodesFree), "Quantity", "resource.Quantity", 1) + `,`, + `NodeFSAvailable:` + strings.Replace(fmt.Sprintf("%v", this.NodeFSAvailable), "Quantity", "resource.Quantity", 1) + `,`, + `NodeFSInodesFree:` + strings.Replace(fmt.Sprintf("%v", this.NodeFSInodesFree), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *KubeletConfigEvictionSoftGracePeriod) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KubeletConfigEvictionSoftGracePeriod{`, + `MemoryAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MemoryAvailable), "Duration", "v11.Duration", 1) + `,`, + `ImageFSAvailable:` + strings.Replace(fmt.Sprintf("%v", this.ImageFSAvailable), "Duration", "v11.Duration", 1) + `,`, + `ImageFSInodesFree:` + strings.Replace(fmt.Sprintf("%v", this.ImageFSInodesFree), "Duration", "v11.Duration", 1) + `,`, + `NodeFSAvailable:` + strings.Replace(fmt.Sprintf("%v", this.NodeFSAvailable), "Duration", "v11.Duration", 1) + `,`, + `NodeFSInodesFree:` + strings.Replace(fmt.Sprintf("%v", this.NodeFSInodesFree), "Duration", "v11.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *KubeletConfigReserved) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KubeletConfigReserved{`, + `CPU:` + strings.Replace(fmt.Sprintf("%v", this.CPU), "Quantity", "resource.Quantity", 1) + `,`, + `Memory:` + strings.Replace(fmt.Sprintf("%v", this.Memory), "Quantity", "resource.Quantity", 1) + `,`, + `EphemeralStorage:` + strings.Replace(fmt.Sprintf("%v", this.EphemeralStorage), "Quantity", "resource.Quantity", 1) + `,`, + `PID:` + strings.Replace(fmt.Sprintf("%v", this.PID), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Kubernetes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Kubernetes{`, + `AllowPrivilegedContainers:` + valueToStringGenerated(this.AllowPrivilegedContainers) + `,`, + `ClusterAutoscaler:` + strings.Replace(this.ClusterAutoscaler.String(), "ClusterAutoscaler", "ClusterAutoscaler", 1) + `,`, + `KubeAPIServer:` + strings.Replace(this.KubeAPIServer.String(), "KubeAPIServerConfig", "KubeAPIServerConfig", 1) + `,`, + `KubeControllerManager:` + strings.Replace(this.KubeControllerManager.String(), "KubeControllerManagerConfig", "KubeControllerManagerConfig", 1) + `,`, + `KubeScheduler:` + strings.Replace(this.KubeScheduler.String(), "KubeSchedulerConfig", "KubeSchedulerConfig", 1) + `,`, + `KubeProxy:` + strings.Replace(this.KubeProxy.String(), "KubeProxyConfig", "KubeProxyConfig", 1) + `,`, + `Kubelet:` + strings.Replace(this.Kubelet.String(), "KubeletConfig", "KubeletConfig", 1) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `VerticalPodAutoscaler:` + strings.Replace(this.VerticalPodAutoscaler.String(), "VerticalPodAutoscaler", "VerticalPodAutoscaler", 1) + `,`, + `}`, + }, "") + return s +} +func (this *KubernetesConfig) String() string { + if this == nil { + return "nil" + } + keysForFeatureGates := make([]string, 0, len(this.FeatureGates)) + for k := range this.FeatureGates { + keysForFeatureGates = append(keysForFeatureGates, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForFeatureGates) + mapStringForFeatureGates := "map[string]bool{" + for _, k := range keysForFeatureGates { + mapStringForFeatureGates += fmt.Sprintf("%v: %v,", k, this.FeatureGates[k]) + } + mapStringForFeatureGates += "}" + s := strings.Join([]string{`&KubernetesConfig{`, + `FeatureGates:` + mapStringForFeatureGates + `,`, + `}`, + }, "") + return s +} +func (this *KubernetesDashboard) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KubernetesDashboard{`, + `AuthenticationMode:` + valueToStringGenerated(this.AuthenticationMode) + `,`, + `Addon:` + strings.Replace(strings.Replace(this.Addon.String(), "Addon", "Addon", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *KubernetesInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KubernetesInfo{`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `}`, + }, "") + return s +} +func (this *KubernetesSettings) String() string { + if this == nil { + return "nil" + } + repeatedStringForVersions := "[]ExpirableVersion{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "ExpirableVersion", "ExpirableVersion", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + s := strings.Join([]string{`&KubernetesSettings{`, + `Versions:` + repeatedStringForVersions + `,`, + `}`, + }, "") + return s +} +func (this *LastError) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LastError{`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `TaskID:` + valueToStringGenerated(this.TaskID) + `,`, + `Codes:` + fmt.Sprintf("%v", this.Codes) + `,`, + `LastUpdateTime:` + strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v11.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LastOperation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LastOperation{`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `Progress:` + fmt.Sprintf("%v", this.Progress) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *Machine) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Machine{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Image:` + strings.Replace(this.Image.String(), "ShootMachineImage", "ShootMachineImage", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MachineControllerManagerSettings) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MachineControllerManagerSettings{`, + `MachineDrainTimeout:` + strings.Replace(fmt.Sprintf("%v", this.MachineDrainTimeout), "Duration", "v11.Duration", 1) + `,`, + `MachineHealthTimeout:` + strings.Replace(fmt.Sprintf("%v", this.MachineHealthTimeout), "Duration", "v11.Duration", 1) + `,`, + `MachineCreationTimeout:` + strings.Replace(fmt.Sprintf("%v", this.MachineCreationTimeout), "Duration", "v11.Duration", 1) + `,`, + `MaxEvictRetries:` + valueToStringGenerated(this.MaxEvictRetries) + `,`, + `NodeConditions:` + fmt.Sprintf("%v", this.NodeConditions) + `,`, + `}`, + }, "") + return s +} +func (this *MachineImage) String() string { + if this == nil { + return "nil" + } + repeatedStringForVersions := "[]MachineImageVersion{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "MachineImageVersion", "MachineImageVersion", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + s := strings.Join([]string{`&MachineImage{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Versions:` + repeatedStringForVersions + `,`, + `}`, + }, "") + return s +} +func (this *MachineImageVersion) String() string { + if this == nil { + return "nil" + } + repeatedStringForCRI := "[]CRI{" + for _, f := range this.CRI { + repeatedStringForCRI += strings.Replace(strings.Replace(f.String(), "CRI", "CRI", 1), `&`, ``, 1) + "," + } + repeatedStringForCRI += "}" + s := strings.Join([]string{`&MachineImageVersion{`, + `ExpirableVersion:` + strings.Replace(strings.Replace(this.ExpirableVersion.String(), "ExpirableVersion", "ExpirableVersion", 1), `&`, ``, 1) + `,`, + `CRI:` + repeatedStringForCRI + `,`, + `}`, + }, "") + return s +} +func (this *MachineType) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MachineType{`, + `CPU:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CPU), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `GPU:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.GPU), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Memory:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Memory), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Storage:` + strings.Replace(this.Storage.String(), "MachineTypeStorage", "MachineTypeStorage", 1) + `,`, + `Usable:` + valueToStringGenerated(this.Usable) + `,`, + `}`, + }, "") + return s +} +func (this *MachineTypeStorage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MachineTypeStorage{`, + `Class:` + fmt.Sprintf("%v", this.Class) + `,`, + `StorageSize:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StorageSize), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *Maintenance) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Maintenance{`, + `AutoUpdate:` + strings.Replace(this.AutoUpdate.String(), "MaintenanceAutoUpdate", "MaintenanceAutoUpdate", 1) + `,`, + `TimeWindow:` + strings.Replace(this.TimeWindow.String(), "MaintenanceTimeWindow", "MaintenanceTimeWindow", 1) + `,`, + `ConfineSpecUpdateRollout:` + valueToStringGenerated(this.ConfineSpecUpdateRollout) + `,`, + `}`, + }, "") + return s +} +func (this *MaintenanceAutoUpdate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MaintenanceAutoUpdate{`, + `KubernetesVersion:` + fmt.Sprintf("%v", this.KubernetesVersion) + `,`, + `MachineImageVersion:` + fmt.Sprintf("%v", this.MachineImageVersion) + `,`, + `}`, + }, "") + return s +} +func (this *MaintenanceTimeWindow) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MaintenanceTimeWindow{`, + `Begin:` + fmt.Sprintf("%v", this.Begin) + `,`, + `End:` + fmt.Sprintf("%v", this.End) + `,`, + `}`, + }, "") + return s +} +func (this *Monitoring) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Monitoring{`, + `Alerting:` + strings.Replace(this.Alerting.String(), "Alerting", "Alerting", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedResourceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedResourceReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ResourceRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceRef), "CrossVersionObjectReference", "v12.CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Networking) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Networking{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, + `Pods:` + valueToStringGenerated(this.Pods) + `,`, + `Nodes:` + valueToStringGenerated(this.Nodes) + `,`, + `Services:` + valueToStringGenerated(this.Services) + `,`, + `}`, + }, "") + return s +} +func (this *NginxIngress) String() string { + if this == nil { + return "nil" + } + keysForConfig := make([]string, 0, len(this.Config)) + for k := range this.Config { + keysForConfig = append(keysForConfig, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForConfig) + mapStringForConfig := "map[string]string{" + for _, k := range keysForConfig { + mapStringForConfig += fmt.Sprintf("%v: %v,", k, this.Config[k]) + } + mapStringForConfig += "}" + s := strings.Join([]string{`&NginxIngress{`, + `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`, + `Config:` + mapStringForConfig + `,`, + `ExternalTrafficPolicy:` + valueToStringGenerated(this.ExternalTrafficPolicy) + `,`, + `Addon:` + strings.Replace(strings.Replace(this.Addon.String(), "Addon", "Addon", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *OIDCConfig) String() string { + if this == nil { + return "nil" + } + keysForRequiredClaims := make([]string, 0, len(this.RequiredClaims)) + for k := range this.RequiredClaims { + keysForRequiredClaims = append(keysForRequiredClaims, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequiredClaims) + mapStringForRequiredClaims := "map[string]string{" + for _, k := range keysForRequiredClaims { + mapStringForRequiredClaims += fmt.Sprintf("%v: %v,", k, this.RequiredClaims[k]) + } + mapStringForRequiredClaims += "}" + s := strings.Join([]string{`&OIDCConfig{`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `ClientAuthentication:` + strings.Replace(this.ClientAuthentication.String(), "OpenIDConnectClientAuthentication", "OpenIDConnectClientAuthentication", 1) + `,`, + `ClientID:` + valueToStringGenerated(this.ClientID) + `,`, + `GroupsClaim:` + valueToStringGenerated(this.GroupsClaim) + `,`, + `GroupsPrefix:` + valueToStringGenerated(this.GroupsPrefix) + `,`, + `IssuerURL:` + valueToStringGenerated(this.IssuerURL) + `,`, + `RequiredClaims:` + mapStringForRequiredClaims + `,`, + `SigningAlgs:` + fmt.Sprintf("%v", this.SigningAlgs) + `,`, + `UsernameClaim:` + valueToStringGenerated(this.UsernameClaim) + `,`, + `UsernamePrefix:` + valueToStringGenerated(this.UsernamePrefix) + `,`, + `}`, + }, "") + return s +} +func (this *OpenIDConnectClientAuthentication) String() string { + if this == nil { + return "nil" + } + keysForExtraConfig := make([]string, 0, len(this.ExtraConfig)) + for k := range this.ExtraConfig { + keysForExtraConfig = append(keysForExtraConfig, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtraConfig) + mapStringForExtraConfig := "map[string]string{" + for _, k := range keysForExtraConfig { + mapStringForExtraConfig += fmt.Sprintf("%v: %v,", k, this.ExtraConfig[k]) + } + mapStringForExtraConfig += "}" + s := strings.Join([]string{`&OpenIDConnectClientAuthentication{`, + `ExtraConfig:` + mapStringForExtraConfig + `,`, + `Secret:` + valueToStringGenerated(this.Secret) + `,`, + `}`, + }, "") + return s +} +func (this *Plant) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Plant{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PlantSpec", "PlantSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PlantStatus", "PlantStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PlantList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Plant{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Plant", "Plant", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PlantList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PlantSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForEndpoints := "[]Endpoint{" + for _, f := range this.Endpoints { + repeatedStringForEndpoints += strings.Replace(strings.Replace(f.String(), "Endpoint", "Endpoint", 1), `&`, ``, 1) + "," + } + repeatedStringForEndpoints += "}" + s := strings.Join([]string{`&PlantSpec{`, + `SecretRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "v1.LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Endpoints:` + repeatedStringForEndpoints + `,`, + `}`, + }, "") + return s +} +func (this *PlantStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&PlantStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, + `ClusterInfo:` + strings.Replace(this.ClusterInfo.String(), "ClusterInfo", "ClusterInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Project) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Project{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ProjectSpec", "ProjectSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ProjectStatus", "ProjectStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Project{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Project", "Project", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ProjectList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ProjectMember) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProjectMember{`, + `Subject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subject), "Subject", "v13.Subject", 1), `&`, ``, 1) + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Roles:` + fmt.Sprintf("%v", this.Roles) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForMembers := "[]ProjectMember{" + for _, f := range this.Members { + repeatedStringForMembers += strings.Replace(strings.Replace(f.String(), "ProjectMember", "ProjectMember", 1), `&`, ``, 1) + "," + } + repeatedStringForMembers += "}" + s := strings.Join([]string{`&ProjectSpec{`, + `CreatedBy:` + strings.Replace(fmt.Sprintf("%v", this.CreatedBy), "Subject", "v13.Subject", 1) + `,`, + `Description:` + valueToStringGenerated(this.Description) + `,`, + `Owner:` + strings.Replace(fmt.Sprintf("%v", this.Owner), "Subject", "v13.Subject", 1) + `,`, + `Purpose:` + valueToStringGenerated(this.Purpose) + `,`, + `Members:` + repeatedStringForMembers + `,`, + `Namespace:` + valueToStringGenerated(this.Namespace) + `,`, + `Tolerations:` + strings.Replace(this.Tolerations.String(), "ProjectTolerations", "ProjectTolerations", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProjectStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `StaleSinceTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.StaleSinceTimestamp), "Time", "v11.Time", 1) + `,`, + `StaleAutoDeleteTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.StaleAutoDeleteTimestamp), "Time", "v11.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectTolerations) String() string { + if this == nil { + return "nil" + } + repeatedStringForDefaults := "[]Toleration{" + for _, f := range this.Defaults { + repeatedStringForDefaults += strings.Replace(strings.Replace(f.String(), "Toleration", "Toleration", 1), `&`, ``, 1) + "," + } + repeatedStringForDefaults += "}" + repeatedStringForWhitelist := "[]Toleration{" + for _, f := range this.Whitelist { + repeatedStringForWhitelist += strings.Replace(strings.Replace(f.String(), "Toleration", "Toleration", 1), `&`, ``, 1) + "," + } + repeatedStringForWhitelist += "}" + s := strings.Join([]string{`&ProjectTolerations{`, + `Defaults:` + repeatedStringForDefaults + `,`, + `Whitelist:` + repeatedStringForWhitelist + `,`, + `}`, + }, "") + return s +} +func (this *Provider) String() string { + if this == nil { + return "nil" + } + repeatedStringForWorkers := "[]Worker{" + for _, f := range this.Workers { + repeatedStringForWorkers += strings.Replace(strings.Replace(f.String(), "Worker", "Worker", 1), `&`, ``, 1) + "," + } + repeatedStringForWorkers += "}" + s := strings.Join([]string{`&Provider{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ControlPlaneConfig:` + strings.Replace(fmt.Sprintf("%v", this.ControlPlaneConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, + `InfrastructureConfig:` + strings.Replace(fmt.Sprintf("%v", this.InfrastructureConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, + `Workers:` + repeatedStringForWorkers + `,`, + `}`, + }, "") + return s +} +func (this *Quota) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Quota{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "QuotaSpec", "QuotaSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *QuotaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Quota{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Quota", "Quota", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&QuotaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *QuotaSpec) String() string { + if this == nil { + return "nil" + } + keysForMetrics := make([]string, 0, len(this.Metrics)) + for k := range this.Metrics { + keysForMetrics = append(keysForMetrics, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetrics) + mapStringForMetrics := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForMetrics { + mapStringForMetrics += fmt.Sprintf("%v: %v,", k, this.Metrics[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForMetrics += "}" + s := strings.Join([]string{`&QuotaSpec{`, + `ClusterLifetimeDays:` + valueToStringGenerated(this.ClusterLifetimeDays) + `,`, + `Metrics:` + mapStringForMetrics + `,`, + `Scope:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Scope), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Region) String() string { + if this == nil { + return "nil" + } + repeatedStringForZones := "[]AvailabilityZone{" + for _, f := range this.Zones { + repeatedStringForZones += strings.Replace(strings.Replace(f.String(), "AvailabilityZone", "AvailabilityZone", 1), `&`, ``, 1) + "," + } + repeatedStringForZones += "}" + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&Region{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Zones:` + repeatedStringForZones + `,`, + `Labels:` + mapStringForLabels + `,`, + `}`, + }, "") + return s +} +func (this *ResourceData) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceData{`, + `CrossVersionObjectReference:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CrossVersionObjectReference), "CrossVersionObjectReference", "v12.CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceWatchCacheSize) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceWatchCacheSize{`, + `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `CacheSize:` + fmt.Sprintf("%v", this.CacheSize) + `,`, + `}`, + }, "") + return s +} +func (this *SecretBinding) String() string { + if this == nil { + return "nil" + } + repeatedStringForQuotas := "[]ObjectReference{" + for _, f := range this.Quotas { + repeatedStringForQuotas += fmt.Sprintf("%v", f) + "," + } + repeatedStringForQuotas += "}" + s := strings.Join([]string{`&SecretBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `SecretRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "v1.SecretReference", 1), `&`, ``, 1) + `,`, + `Quotas:` + repeatedStringForQuotas + `,`, + `}`, + }, "") + return s +} +func (this *SecretBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]SecretBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "SecretBinding", "SecretBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecretBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Seed) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Seed{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SeedSpec", "SeedSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SeedStatus", "SeedStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SeedBackup) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedBackup{`, + `Provider:` + fmt.Sprintf("%v", this.Provider) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, + `Region:` + valueToStringGenerated(this.Region) + `,`, + `SecretRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "v1.SecretReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SeedDNS) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedDNS{`, + `IngressDomain:` + valueToStringGenerated(this.IngressDomain) + `,`, + `Provider:` + strings.Replace(this.Provider.String(), "SeedDNSProvider", "SeedDNSProvider", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SeedDNSProvider) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedDNSProvider{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `SecretRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "v1.SecretReference", 1), `&`, ``, 1) + `,`, + `Domains:` + strings.Replace(this.Domains.String(), "DNSIncludeExclude", "DNSIncludeExclude", 1) + `,`, + `Zones:` + strings.Replace(this.Zones.String(), "DNSIncludeExclude", "DNSIncludeExclude", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SeedList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Seed{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Seed", "Seed", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SeedList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *SeedNetworks) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedNetworks{`, + `Nodes:` + valueToStringGenerated(this.Nodes) + `,`, + `Pods:` + fmt.Sprintf("%v", this.Pods) + `,`, + `Services:` + fmt.Sprintf("%v", this.Services) + `,`, + `ShootDefaults:` + strings.Replace(this.ShootDefaults.String(), "ShootNetworks", "ShootNetworks", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SeedProvider) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedProvider{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, + `Region:` + fmt.Sprintf("%v", this.Region) + `,`, + `}`, + }, "") + return s +} +func (this *SeedSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedSelector{`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`, + `ProviderTypes:` + fmt.Sprintf("%v", this.ProviderTypes) + `,`, + `}`, + }, "") + return s +} +func (this *SeedSettingExcessCapacityReservation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedSettingExcessCapacityReservation{`, + `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`, + `}`, + }, "") + return s +} +func (this *SeedSettingLoadBalancerServices) String() string { + if this == nil { + return "nil" + } + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&SeedSettingLoadBalancerServices{`, + `Annotations:` + mapStringForAnnotations + `,`, + `}`, + }, "") + return s +} +func (this *SeedSettingScheduling) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedSettingScheduling{`, + `Visible:` + fmt.Sprintf("%v", this.Visible) + `,`, + `}`, + }, "") + return s +} +func (this *SeedSettingShootDNS) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedSettingShootDNS{`, + `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`, + `}`, + }, "") + return s +} +func (this *SeedSettingVerticalPodAutoscaler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedSettingVerticalPodAutoscaler{`, + `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`, + `}`, + }, "") + return s +} +func (this *SeedSettings) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedSettings{`, + `ExcessCapacityReservation:` + strings.Replace(this.ExcessCapacityReservation.String(), "SeedSettingExcessCapacityReservation", "SeedSettingExcessCapacityReservation", 1) + `,`, + `Scheduling:` + strings.Replace(this.Scheduling.String(), "SeedSettingScheduling", "SeedSettingScheduling", 1) + `,`, + `ShootDNS:` + strings.Replace(this.ShootDNS.String(), "SeedSettingShootDNS", "SeedSettingShootDNS", 1) + `,`, + `LoadBalancerServices:` + strings.Replace(this.LoadBalancerServices.String(), "SeedSettingLoadBalancerServices", "SeedSettingLoadBalancerServices", 1) + `,`, + `VerticalPodAutoscaler:` + strings.Replace(this.VerticalPodAutoscaler.String(), "SeedSettingVerticalPodAutoscaler", "SeedSettingVerticalPodAutoscaler", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SeedSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTaints := "[]SeedTaint{" + for _, f := range this.Taints { + repeatedStringForTaints += strings.Replace(strings.Replace(f.String(), "SeedTaint", "SeedTaint", 1), `&`, ``, 1) + "," + } + repeatedStringForTaints += "}" + s := strings.Join([]string{`&SeedSpec{`, + `Backup:` + strings.Replace(this.Backup.String(), "SeedBackup", "SeedBackup", 1) + `,`, + `BlockCIDRs:` + fmt.Sprintf("%v", this.BlockCIDRs) + `,`, + `DNS:` + strings.Replace(strings.Replace(this.DNS.String(), "SeedDNS", "SeedDNS", 1), `&`, ``, 1) + `,`, + `Networks:` + strings.Replace(strings.Replace(this.Networks.String(), "SeedNetworks", "SeedNetworks", 1), `&`, ``, 1) + `,`, + `Provider:` + strings.Replace(strings.Replace(this.Provider.String(), "SeedProvider", "SeedProvider", 1), `&`, ``, 1) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "v1.SecretReference", 1) + `,`, + `Taints:` + repeatedStringForTaints + `,`, + `Volume:` + strings.Replace(this.Volume.String(), "SeedVolume", "SeedVolume", 1) + `,`, + `Settings:` + strings.Replace(this.Settings.String(), "SeedSettings", "SeedSettings", 1) + `,`, + `Ingress:` + strings.Replace(this.Ingress.String(), "Ingress", "Ingress", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SeedStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&SeedStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `Gardener:` + strings.Replace(this.Gardener.String(), "Gardener", "Gardener", 1) + `,`, + `KubernetesVersion:` + valueToStringGenerated(this.KubernetesVersion) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ClusterIdentity:` + valueToStringGenerated(this.ClusterIdentity) + `,`, + `}`, + }, "") + return s +} +func (this *SeedTaint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedTaint{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + valueToStringGenerated(this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *SeedVolume) String() string { + if this == nil { + return "nil" + } + repeatedStringForProviders := "[]SeedVolumeProvider{" + for _, f := range this.Providers { + repeatedStringForProviders += strings.Replace(strings.Replace(f.String(), "SeedVolumeProvider", "SeedVolumeProvider", 1), `&`, ``, 1) + "," + } + repeatedStringForProviders += "}" + s := strings.Join([]string{`&SeedVolume{`, + `MinimumSize:` + strings.Replace(fmt.Sprintf("%v", this.MinimumSize), "Quantity", "resource.Quantity", 1) + `,`, + `Providers:` + repeatedStringForProviders + `,`, + `}`, + }, "") + return s +} +func (this *SeedVolumeProvider) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedVolumeProvider{`, + `Purpose:` + fmt.Sprintf("%v", this.Purpose) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountConfig{`, + `Issuer:` + valueToStringGenerated(this.Issuer) + `,`, + `SigningKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.SigningKeySecret), "LocalObjectReference", "v1.LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Shoot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Shoot{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ShootSpec", "ShootSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ShootStatus", "ShootStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ShootList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Shoot{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Shoot", "Shoot", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ShootList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ShootMachineImage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ShootMachineImage{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, + `Version:` + valueToStringGenerated(this.Version) + `,`, + `}`, + }, "") + return s +} +func (this *ShootNetworks) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ShootNetworks{`, + `Pods:` + valueToStringGenerated(this.Pods) + `,`, + `Services:` + valueToStringGenerated(this.Services) + `,`, + `}`, + }, "") + return s +} +func (this *ShootSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForExtensions := "[]Extension{" + for _, f := range this.Extensions { + repeatedStringForExtensions += strings.Replace(strings.Replace(f.String(), "Extension", "Extension", 1), `&`, ``, 1) + "," + } + repeatedStringForExtensions += "}" + repeatedStringForResources := "[]NamedResourceReference{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "NamedResourceReference", "NamedResourceReference", 1), `&`, ``, 1) + "," + } + repeatedStringForResources += "}" + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "Toleration", "Toleration", 1), `&`, ``, 1) + "," + } + repeatedStringForTolerations += "}" + s := strings.Join([]string{`&ShootSpec{`, + `Addons:` + strings.Replace(this.Addons.String(), "Addons", "Addons", 1) + `,`, + `CloudProfileName:` + fmt.Sprintf("%v", this.CloudProfileName) + `,`, + `DNS:` + strings.Replace(this.DNS.String(), "DNS", "DNS", 1) + `,`, + `Extensions:` + repeatedStringForExtensions + `,`, + `Hibernation:` + strings.Replace(this.Hibernation.String(), "Hibernation", "Hibernation", 1) + `,`, + `Kubernetes:` + strings.Replace(strings.Replace(this.Kubernetes.String(), "Kubernetes", "Kubernetes", 1), `&`, ``, 1) + `,`, + `Networking:` + strings.Replace(strings.Replace(this.Networking.String(), "Networking", "Networking", 1), `&`, ``, 1) + `,`, + `Maintenance:` + strings.Replace(this.Maintenance.String(), "Maintenance", "Maintenance", 1) + `,`, + `Monitoring:` + strings.Replace(this.Monitoring.String(), "Monitoring", "Monitoring", 1) + `,`, + `Provider:` + strings.Replace(strings.Replace(this.Provider.String(), "Provider", "Provider", 1), `&`, ``, 1) + `,`, + `Purpose:` + valueToStringGenerated(this.Purpose) + `,`, + `Region:` + fmt.Sprintf("%v", this.Region) + `,`, + `SecretBindingName:` + fmt.Sprintf("%v", this.SecretBindingName) + `,`, + `SeedName:` + valueToStringGenerated(this.SeedName) + `,`, + `SeedSelector:` + strings.Replace(this.SeedSelector.String(), "SeedSelector", "SeedSelector", 1) + `,`, + `Resources:` + repeatedStringForResources + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `}`, + }, "") + return s +} +func (this *ShootState) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ShootState{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ShootStateSpec", "ShootStateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ShootStateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ShootState{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ShootState", "ShootState", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ShootStateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ShootStateSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForGardener := "[]GardenerResourceData{" + for _, f := range this.Gardener { + repeatedStringForGardener += strings.Replace(strings.Replace(f.String(), "GardenerResourceData", "GardenerResourceData", 1), `&`, ``, 1) + "," + } + repeatedStringForGardener += "}" + repeatedStringForExtensions := "[]ExtensionResourceState{" + for _, f := range this.Extensions { + repeatedStringForExtensions += strings.Replace(strings.Replace(f.String(), "ExtensionResourceState", "ExtensionResourceState", 1), `&`, ``, 1) + "," + } + repeatedStringForExtensions += "}" + repeatedStringForResources := "[]ResourceData{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "ResourceData", "ResourceData", 1), `&`, ``, 1) + "," + } + repeatedStringForResources += "}" + s := strings.Join([]string{`&ShootStateSpec{`, + `Gardener:` + repeatedStringForGardener + `,`, + `Extensions:` + repeatedStringForExtensions + `,`, + `Resources:` + repeatedStringForResources + `,`, + `}`, + }, "") + return s +} +func (this *ShootStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + repeatedStringForConstraints := "[]Condition{" + for _, f := range this.Constraints { + repeatedStringForConstraints += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + "," + } + repeatedStringForConstraints += "}" + repeatedStringForLastErrors := "[]LastError{" + for _, f := range this.LastErrors { + repeatedStringForLastErrors += strings.Replace(strings.Replace(f.String(), "LastError", "LastError", 1), `&`, ``, 1) + "," + } + repeatedStringForLastErrors += "}" + s := strings.Join([]string{`&ShootStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `Constraints:` + repeatedStringForConstraints + `,`, + `Gardener:` + strings.Replace(strings.Replace(this.Gardener.String(), "Gardener", "Gardener", 1), `&`, ``, 1) + `,`, + `IsHibernated:` + fmt.Sprintf("%v", this.IsHibernated) + `,`, + `LastOperation:` + strings.Replace(this.LastOperation.String(), "LastOperation", "LastOperation", 1) + `,`, + `LastError:` + strings.Replace(this.LastError.String(), "LastError", "LastError", 1) + `,`, + `LastErrors:` + repeatedStringForLastErrors + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `RetryCycleStartTime:` + strings.Replace(fmt.Sprintf("%v", this.RetryCycleStartTime), "Time", "v11.Time", 1) + `,`, + `Seed:` + valueToStringGenerated(this.Seed) + `,`, + `TechnicalID:` + fmt.Sprintf("%v", this.TechnicalID) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ClusterIdentity:` + valueToStringGenerated(this.ClusterIdentity) + `,`, + `}`, + }, "") + return s +} +func (this *Toleration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Toleration{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + valueToStringGenerated(this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *VerticalPodAutoscaler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VerticalPodAutoscaler{`, + `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`, + `EvictAfterOOMThreshold:` + strings.Replace(fmt.Sprintf("%v", this.EvictAfterOOMThreshold), "Duration", "v11.Duration", 1) + `,`, + `EvictionRateBurst:` + valueToStringGenerated(this.EvictionRateBurst) + `,`, + `EvictionRateLimit:` + valueToStringGenerated(this.EvictionRateLimit) + `,`, + `EvictionTolerance:` + valueToStringGenerated(this.EvictionTolerance) + `,`, + `RecommendationMarginFraction:` + valueToStringGenerated(this.RecommendationMarginFraction) + `,`, + `UpdaterInterval:` + strings.Replace(fmt.Sprintf("%v", this.UpdaterInterval), "Duration", "v11.Duration", 1) + `,`, + `RecommenderInterval:` + strings.Replace(fmt.Sprintf("%v", this.RecommenderInterval), "Duration", "v11.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Volume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Volume{`, + `Name:` + valueToStringGenerated(this.Name) + `,`, + `Type:` + valueToStringGenerated(this.Type) + `,`, + `VolumeSize:` + fmt.Sprintf("%v", this.VolumeSize) + `,`, + `Encrypted:` + valueToStringGenerated(this.Encrypted) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeType) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeType{`, + `Class:` + fmt.Sprintf("%v", this.Class) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Usable:` + valueToStringGenerated(this.Usable) + `,`, + `}`, + }, "") + return s +} +func (this *WatchCacheSizes) String() string { + if this == nil { + return "nil" + } + repeatedStringForResources := "[]ResourceWatchCacheSize{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "ResourceWatchCacheSize", "ResourceWatchCacheSize", 1), `&`, ``, 1) + "," + } + repeatedStringForResources += "}" + s := strings.Join([]string{`&WatchCacheSizes{`, + `Default:` + valueToStringGenerated(this.Default) + `,`, + `Resources:` + repeatedStringForResources + `,`, + `}`, + }, "") + return s +} +func (this *Worker) String() string { + if this == nil { + return "nil" + } + repeatedStringForTaints := "[]Taint{" + for _, f := range this.Taints { + repeatedStringForTaints += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTaints += "}" + repeatedStringForDataVolumes := "[]DataVolume{" + for _, f := range this.DataVolumes { + repeatedStringForDataVolumes += strings.Replace(strings.Replace(f.String(), "DataVolume", "DataVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForDataVolumes += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&Worker{`, + `Annotations:` + mapStringForAnnotations + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `CRI:` + strings.Replace(this.CRI.String(), "CRI", "CRI", 1) + `,`, + `Kubernetes:` + strings.Replace(this.Kubernetes.String(), "WorkerKubernetes", "WorkerKubernetes", 1) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Machine:` + strings.Replace(strings.Replace(this.Machine.String(), "Machine", "Machine", 1), `&`, ``, 1) + `,`, + `Maximum:` + fmt.Sprintf("%v", this.Maximum) + `,`, + `Minimum:` + fmt.Sprintf("%v", this.Minimum) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, + `Taints:` + repeatedStringForTaints + `,`, + `Volume:` + strings.Replace(this.Volume.String(), "Volume", "Volume", 1) + `,`, + `DataVolumes:` + repeatedStringForDataVolumes + `,`, + `KubeletDataVolumeName:` + valueToStringGenerated(this.KubeletDataVolumeName) + `,`, + `Zones:` + fmt.Sprintf("%v", this.Zones) + `,`, + `SystemComponents:` + strings.Replace(this.SystemComponents.String(), "WorkerSystemComponents", "WorkerSystemComponents", 1) + `,`, + `MachineControllerManagerSettings:` + strings.Replace(this.MachineControllerManagerSettings.String(), "MachineControllerManagerSettings", "MachineControllerManagerSettings", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WorkerKubernetes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WorkerKubernetes{`, + `Kubelet:` + strings.Replace(this.Kubelet.String(), "KubeletConfig", "KubeletConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WorkerSystemComponents) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WorkerSystemComponents{`, + `Allow:` + fmt.Sprintf("%v", this.Allow) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Addon) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Addon: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Addon: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Enabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Addons) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Addons: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Addons: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubernetesDashboard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KubernetesDashboard == nil { + m.KubernetesDashboard = &KubernetesDashboard{} + } + if err := m.KubernetesDashboard.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NginxIngress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NginxIngress == nil { + m.NginxIngress = &NginxIngress{} + } + if err := m.NginxIngress.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdmissionPlugin) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionPlugin: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionPlugin: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &runtime.RawExtension{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Alerting) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Alerting: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Alerting: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EmailReceivers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EmailReceivers = append(m.EmailReceivers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuditConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuditPolicy == nil { + m.AuditPolicy = &AuditPolicy{} + } + if err := m.AuditPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuditPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMapRef == nil { + m.ConfigMapRef = &v1.ObjectReference{} + } + if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AvailabilityZone) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AvailabilityZone: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AvailabilityZone: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableMachineTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UnavailableMachineTypes = append(m.UnavailableMachineTypes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableVolumeTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UnavailableVolumeTypes = append(m.UnavailableVolumeTypes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BackupBucket) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BackupBucket: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BackupBucket: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BackupBucketList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BackupBucketList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BackupBucketList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, BackupBucket{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BackupBucketProvider) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BackupBucketProvider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BackupBucketProvider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Region = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BackupBucketSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BackupBucketSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BackupBucketSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProviderConfig == nil { + m.ProviderConfig = &runtime.RawExtension{} + } + if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Seed", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Seed = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BackupBucketStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BackupBucketStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BackupBucketStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProviderStatus == nil { + m.ProviderStatus = &runtime.RawExtension{} + } + if err := m.ProviderStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastOperation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastOperation == nil { + m.LastOperation = &LastOperation{} + } + if err := m.LastOperation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastError == nil { + m.LastError = &LastError{} + } + if err := m.LastError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GeneratedSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GeneratedSecretRef == nil { + m.GeneratedSecretRef = &v1.SecretReference{} + } + if err := m.GeneratedSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BackupEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BackupEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BackupEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BackupEntryList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BackupEntryList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BackupEntryList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, BackupEntry{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BackupEntrySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BackupEntrySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BackupEntrySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BucketName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BucketName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Seed", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Seed = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BackupEntryStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BackupEntryStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BackupEntryStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastOperation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastOperation == nil { + m.LastOperation = &LastOperation{} + } + if err := m.LastOperation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastError == nil { + m.LastError = &LastError{} + } + if err := m.LastError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CRI) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CRI: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CRI: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = CRIName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerRuntimes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerRuntimes = append(m.ContainerRuntimes, ContainerRuntime{}) + if err := m.ContainerRuntimes[len(m.ContainerRuntimes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CloudInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CloudInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CloudInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Region = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CloudProfile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CloudProfile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CloudProfile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CloudProfileList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CloudProfileList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CloudProfileList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CloudProfile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CloudProfileSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CloudProfileSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CloudProfileSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.CABundle = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kubernetes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Kubernetes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MachineImages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MachineImages = append(m.MachineImages, MachineImage{}) + if err := m.MachineImages[len(m.MachineImages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MachineTypes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MachineTypes = append(m.MachineTypes, MachineType{}) + if err := m.MachineTypes[len(m.MachineTypes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProviderConfig == nil { + m.ProviderConfig = &runtime.RawExtension{} + } + if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Regions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Regions = append(m.Regions, Region{}) + if err := m.Regions[len(m.Regions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeedSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SeedSelector == nil { + m.SeedSelector = &SeedSelector{} + } + if err := m.SeedSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeTypes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeTypes = append(m.VolumeTypes, VolumeType{}) + if err := m.VolumeTypes[len(m.VolumeTypes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterAutoscaler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownDelayAfterAdd", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleDownDelayAfterAdd == nil { + m.ScaleDownDelayAfterAdd = &v11.Duration{} + } + if err := m.ScaleDownDelayAfterAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownDelayAfterDelete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleDownDelayAfterDelete == nil { + m.ScaleDownDelayAfterDelete = &v11.Duration{} + } + if err := m.ScaleDownDelayAfterDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownDelayAfterFailure", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleDownDelayAfterFailure == nil { + m.ScaleDownDelayAfterFailure = &v11.Duration{} + } + if err := m.ScaleDownDelayAfterFailure.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownUnneededTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleDownUnneededTime == nil { + m.ScaleDownUnneededTime = &v11.Duration{} + } + if err := m.ScaleDownUnneededTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleDownUtilizationThreshold", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.ScaleDownUtilizationThreshold = &v2 + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScanInterval", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScanInterval == nil { + m.ScanInterval = &v11.Duration{} + } + if err := m.ScanInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cloud", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Cloud.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kubernetes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Kubernetes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Condition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Condition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Condition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Codes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Codes = append(m.Codes, ErrorCode(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerRuntime) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerRuntime: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerRuntime: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProviderConfig == nil { + m.ProviderConfig = &runtime.RawExtension{} + } + if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ControllerDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProviderConfig == nil { + m.ProviderConfig = &runtime.RawExtension{} + } + if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Policy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ControllerDeploymentPolicy(dAtA[iNdEx:postIndex]) + m.Policy = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeedSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SeedSelector == nil { + m.SeedSelector = &v11.LabelSelector{} + } + if err := m.SeedSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ControllerInstallation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerInstallation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerInstallation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ControllerInstallationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerInstallationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerInstallationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ControllerInstallation{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ControllerInstallationSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerInstallationSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerInstallationSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegistrationRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RegistrationRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeedRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SeedRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ControllerInstallationStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerInstallationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerInstallationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProviderStatus == nil { + m.ProviderStatus = &runtime.RawExtension{} + } + if err := m.ProviderStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ControllerRegistration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRegistration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRegistration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ControllerRegistrationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRegistrationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRegistrationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ControllerRegistration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ControllerRegistrationSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRegistrationSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRegistrationSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, ControllerResource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Deployment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Deployment == nil { + m.Deployment = &ControllerDeployment{} + } + if err := m.Deployment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ControllerResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GloballyEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.GloballyEnabled = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReconcileTimeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReconcileTimeout == nil { + m.ReconcileTimeout = &v11.Duration{} + } + if err := m.ReconcileTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Primary", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Primary = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DNS) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DNS: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DNS: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Domain = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Providers = append(m.Providers, DNSProvider{}) + if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DNSIncludeExclude) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DNSIncludeExclude: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DNSIncludeExclude: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Include", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Include = append(m.Include, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exclude", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exclude = append(m.Exclude, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DNSProvider) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DNSProvider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DNSProvider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domains", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Domains == nil { + m.Domains = &DNSIncludeExclude{} + } + if err := m.Domains.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Primary", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Primary = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.SecretName = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Type = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Zones == nil { + m.Zones = &DNSIncludeExclude{} + } + if err := m.Zones.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DataVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DataVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DataVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Type = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeSize", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeSize = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Encrypted", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Encrypted = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Endpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Purpose = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExpirableVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExpirableVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExpirableVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationDate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExpirationDate == nil { + m.ExpirationDate = &v11.Time{} + } + if err := m.ExpirationDate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Classification", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := VersionClassification(dAtA[iNdEx:postIndex]) + m.Classification = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Extension) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Extension: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Extension: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProviderConfig == nil { + m.ProviderConfig = &runtime.RawExtension{} + } + if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Disabled = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExtensionResourceState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtensionResourceState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtensionResourceState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Purpose = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.State == nil { + m.State = &runtime.RawExtension{} + } + if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, v1beta1.NamedResourceReference{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Gardener) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Gardener: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Gardener: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GardenerResourceData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GardenerResourceData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GardenerResourceData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Hibernation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Hibernation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Hibernation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Enabled = &b + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schedules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schedules = append(m.Schedules, HibernationSchedule{}) + if err := m.Schedules[len(m.Schedules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HibernationSchedule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HibernationSchedule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HibernationSchedule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Start = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.End = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Location", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Location = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CPUInitializationPeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CPUInitializationPeriod == nil { + m.CPUInitializationPeriod = &v11.Duration{} + } + if err := m.CPUInitializationPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DownscaleDelay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DownscaleDelay == nil { + m.DownscaleDelay = &v11.Duration{} + } + if err := m.DownscaleDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DownscaleStabilization", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DownscaleStabilization == nil { + m.DownscaleStabilization = &v11.Duration{} + } + if err := m.DownscaleStabilization.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialReadinessDelay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InitialReadinessDelay == nil { + m.InitialReadinessDelay = &v11.Duration{} + } + if err := m.InitialReadinessDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SyncPeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SyncPeriod == nil { + m.SyncPeriod = &v11.Duration{} + } + if err := m.SyncPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerance", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Tolerance = &v2 + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpscaleDelay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpscaleDelay == nil { + m.UpscaleDelay = &v11.Duration{} + } + if err := m.UpscaleDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ingress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Controller.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressController) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressController: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressController: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProviderConfig == nil { + m.ProviderConfig = &runtime.RawExtension{} + } + if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KubeAPIServerConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KubeAPIServerConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KubeAPIServerConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionPlugins", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionPlugins = append(m.AdmissionPlugins, AdmissionPlugin{}) + if err := m.AdmissionPlugins[len(m.AdmissionPlugins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIAudiences", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIAudiences = append(m.APIAudiences, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuditConfig == nil { + m.AuditConfig = &AuditConfig{} + } + if err := m.AuditConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EnableBasicAuthentication", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.EnableBasicAuthentication = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OIDCConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OIDCConfig == nil { + m.OIDCConfig = &OIDCConfig{} + } + if err := m.OIDCConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RuntimeConfig == nil { + m.RuntimeConfig = make(map[string]bool) + } + var mapkey string + var mapvalue bool + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapvaluetemp int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvaluetemp |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + mapvalue = bool(mapvaluetemp != 0) + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.RuntimeConfig[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceAccountConfig == nil { + m.ServiceAccountConfig = &ServiceAccountConfig{} + } + if err := m.ServiceAccountConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchCacheSizes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WatchCacheSizes == nil { + m.WatchCacheSizes = &WatchCacheSizes{} + } + if err := m.WatchCacheSizes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Requests == nil { + m.Requests = &KubeAPIServerRequests{} + } + if err := m.Requests.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KubeAPIServerRequests) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KubeAPIServerRequests: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KubeAPIServerRequests: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxNonMutatingInflight", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxNonMutatingInflight = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxMutatingInflight", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxMutatingInflight = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KubeControllerManagerConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KubeControllerManagerConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KubeControllerManagerConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HorizontalPodAutoscalerConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HorizontalPodAutoscalerConfig == nil { + m.HorizontalPodAutoscalerConfig = &HorizontalPodAutoscalerConfig{} + } + if err := m.HorizontalPodAutoscalerConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeCIDRMaskSize", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NodeCIDRMaskSize = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodEvictionTimeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodEvictionTimeout == nil { + m.PodEvictionTimeout = &v11.Duration{} + } + if err := m.PodEvictionTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KubeProxyConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KubeProxyConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KubeProxyConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ProxyMode(dAtA[iNdEx:postIndex]) + m.Mode = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KubeSchedulerConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KubeSchedulerConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KubeSchedulerConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeMaxPDVols", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.KubeMaxPDVols = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KubeletConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KubeletConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KubeletConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CPUCFSQuota", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.CPUCFSQuota = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CPUManagerPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.CPUManagerPolicy = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvictionHard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EvictionHard == nil { + m.EvictionHard = &KubeletConfigEviction{} + } + if err := m.EvictionHard.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EvictionMaxPodGracePeriod", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EvictionMaxPodGracePeriod = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvictionMinimumReclaim", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EvictionMinimumReclaim == nil { + m.EvictionMinimumReclaim = &KubeletConfigEvictionMinimumReclaim{} + } + if err := m.EvictionMinimumReclaim.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvictionPressureTransitionPeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EvictionPressureTransitionPeriod == nil { + m.EvictionPressureTransitionPeriod = &v11.Duration{} + } + if err := m.EvictionPressureTransitionPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvictionSoft", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EvictionSoft == nil { + m.EvictionSoft = &KubeletConfigEviction{} + } + if err := m.EvictionSoft.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvictionSoftGracePeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EvictionSoftGracePeriod == nil { + m.EvictionSoftGracePeriod = &KubeletConfigEvictionSoftGracePeriod{} + } + if err := m.EvictionSoftGracePeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPods", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxPods = &v + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PodPIDsLimit", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PodPIDsLimit = &v + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullProgressDeadline", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImagePullProgressDeadline == nil { + m.ImagePullProgressDeadline = &v11.Duration{} + } + if err := m.ImagePullProgressDeadline.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailSwapOn", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.FailSwapOn = &b + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeReserved", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KubeReserved == nil { + m.KubeReserved = &KubeletConfigReserved{} + } + if err := m.KubeReserved.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SystemReserved", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SystemReserved == nil { + m.SystemReserved = &KubeletConfigReserved{} + } + if err := m.SystemReserved.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KubeletConfigEviction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KubeletConfigEviction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KubeletConfigEviction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryAvailable", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.MemoryAvailable = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageFSAvailable", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ImageFSAvailable = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageFSInodesFree", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ImageFSInodesFree = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeFSAvailable", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeFSAvailable = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeFSInodesFree", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeFSInodesFree = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KubeletConfigEvictionMinimumReclaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KubeletConfigEvictionMinimumReclaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KubeletConfigEvictionMinimumReclaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryAvailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MemoryAvailable == nil { + m.MemoryAvailable = &resource.Quantity{} + } + if err := m.MemoryAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageFSAvailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageFSAvailable == nil { + m.ImageFSAvailable = &resource.Quantity{} + } + if err := m.ImageFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageFSInodesFree", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageFSInodesFree == nil { + m.ImageFSInodesFree = &resource.Quantity{} + } + if err := m.ImageFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeFSAvailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeFSAvailable == nil { + m.NodeFSAvailable = &resource.Quantity{} + } + if err := m.NodeFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeFSInodesFree", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeFSInodesFree == nil { + m.NodeFSInodesFree = &resource.Quantity{} + } + if err := m.NodeFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KubeletConfigEvictionSoftGracePeriod) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KubeletConfigEvictionSoftGracePeriod: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KubeletConfigEvictionSoftGracePeriod: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryAvailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MemoryAvailable == nil { + m.MemoryAvailable = &v11.Duration{} + } + if err := m.MemoryAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageFSAvailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageFSAvailable == nil { + m.ImageFSAvailable = &v11.Duration{} + } + if err := m.ImageFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageFSInodesFree", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageFSInodesFree == nil { + m.ImageFSInodesFree = &v11.Duration{} + } + if err := m.ImageFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeFSAvailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeFSAvailable == nil { + m.NodeFSAvailable = &v11.Duration{} + } + if err := m.NodeFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeFSInodesFree", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeFSInodesFree == nil { + m.NodeFSInodesFree = &v11.Duration{} + } + if err := m.NodeFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KubeletConfigReserved) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KubeletConfigReserved: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KubeletConfigReserved: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CPU == nil { + m.CPU = &resource.Quantity{} + } + if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Memory == nil { + m.Memory = &resource.Quantity{} + } + if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralStorage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EphemeralStorage == nil { + m.EphemeralStorage = &resource.Quantity{} + } + if err := m.EphemeralStorage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PID == nil { + m.PID = &resource.Quantity{} + } + if err := m.PID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Kubernetes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Kubernetes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Kubernetes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegedContainers", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowPrivilegedContainers = &b + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterAutoscaler", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClusterAutoscaler == nil { + m.ClusterAutoscaler = &ClusterAutoscaler{} + } + if err := m.ClusterAutoscaler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeAPIServer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KubeAPIServer == nil { + m.KubeAPIServer = &KubeAPIServerConfig{} + } + if err := m.KubeAPIServer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeControllerManager", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KubeControllerManager == nil { + m.KubeControllerManager = &KubeControllerManagerConfig{} + } + if err := m.KubeControllerManager.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeScheduler", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KubeScheduler == nil { + m.KubeScheduler = &KubeSchedulerConfig{} + } + if err := m.KubeScheduler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeProxy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KubeProxy == nil { + m.KubeProxy = &KubeProxyConfig{} + } + if err := m.KubeProxy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kubelet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kubelet == nil { + m.Kubelet = &KubeletConfig{} + } + if err := m.Kubelet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VerticalPodAutoscaler", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VerticalPodAutoscaler == nil { + m.VerticalPodAutoscaler = &VerticalPodAutoscaler{} + } + if err := m.VerticalPodAutoscaler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KubernetesConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KubernetesConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KubernetesConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FeatureGates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FeatureGates == nil { + m.FeatureGates = make(map[string]bool) + } + var mapkey string + var mapvalue bool + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapvaluetemp int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvaluetemp |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + mapvalue = bool(mapvaluetemp != 0) + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.FeatureGates[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KubernetesDashboard) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KubernetesDashboard: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KubernetesDashboard: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthenticationMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.AuthenticationMode = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addon", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Addon.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KubernetesInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KubernetesInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KubernetesInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KubernetesSettings) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KubernetesSettings: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KubernetesSettings: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, ExpirableVersion{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LastError) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LastError: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LastError: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.TaskID = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Codes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Codes = append(m.Codes, ErrorCode(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastUpdateTime == nil { + m.LastUpdateTime = &v11.Time{} + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LastOperation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LastOperation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LastOperation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Progress", wireType) + } + m.Progress = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Progress |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = LastOperationState(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = LastOperationType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Machine) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Machine: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Machine: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &ShootMachineImage{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MachineControllerManagerSettings) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MachineControllerManagerSettings: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MachineControllerManagerSettings: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MachineDrainTimeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MachineDrainTimeout == nil { + m.MachineDrainTimeout = &v11.Duration{} + } + if err := m.MachineDrainTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MachineHealthTimeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MachineHealthTimeout == nil { + m.MachineHealthTimeout = &v11.Duration{} + } + if err := m.MachineHealthTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MachineCreationTimeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MachineCreationTimeout == nil { + m.MachineCreationTimeout = &v11.Duration{} + } + if err := m.MachineCreationTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxEvictRetries", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxEvictRetries = &v + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeConditions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeConditions = append(m.NodeConditions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MachineImage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MachineImage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MachineImage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, MachineImageVersion{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MachineImageVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MachineImageVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MachineImageVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirableVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ExpirableVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CRI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CRI = append(m.CRI, CRI{}) + if err := m.CRI[len(m.CRI)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MachineType) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MachineType: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MachineType: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GPU", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Storage == nil { + m.Storage = &MachineTypeStorage{} + } + if err := m.Storage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Usable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Usable = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MachineTypeStorage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MachineTypeStorage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MachineTypeStorage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Class", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Class = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageSize", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StorageSize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Maintenance) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Maintenance: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Maintenance: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AutoUpdate == nil { + m.AutoUpdate = &MaintenanceAutoUpdate{} + } + if err := m.AutoUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeWindow", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TimeWindow == nil { + m.TimeWindow = &MaintenanceTimeWindow{} + } + if err := m.TimeWindow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfineSpecUpdateRollout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ConfineSpecUpdateRollout = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MaintenanceAutoUpdate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MaintenanceAutoUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MaintenanceAutoUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KubernetesVersion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.KubernetesVersion = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MachineImageVersion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MachineImageVersion = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MaintenanceTimeWindow) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MaintenanceTimeWindow: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MaintenanceTimeWindow: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Begin", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Begin = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.End = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Monitoring) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Monitoring: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Monitoring: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alerting", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Alerting == nil { + m.Alerting = &Alerting{} + } + if err := m.Alerting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedResourceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedResourceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedResourceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ResourceRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Networking) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Networking: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Networking: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProviderConfig == nil { + m.ProviderConfig = &runtime.RawExtension{} + } + if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Pods = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Nodes = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Services = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NginxIngress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NginxIngress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NginxIngress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Config[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalTrafficPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex]) + m.ExternalTrafficPolicy = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addon", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Addon.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OIDCConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OIDCConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OIDCConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.CABundle = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientAuthentication", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientAuthentication == nil { + m.ClientAuthentication = &OpenIDConnectClientAuthentication{} + } + if err := m.ClientAuthentication.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ClientID = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupsClaim", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.GroupsClaim = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupsPrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.GroupsPrefix = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IssuerURL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.IssuerURL = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredClaims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequiredClaims == nil { + m.RequiredClaims = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.RequiredClaims[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SigningAlgs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SigningAlgs = append(m.SigningAlgs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsernameClaim", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.UsernameClaim = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsernamePrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.UsernamePrefix = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OpenIDConnectClientAuthentication) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OpenIDConnectClientAuthentication: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OpenIDConnectClientAuthentication: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtraConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExtraConfig == nil { + m.ExtraConfig = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ExtraConfig[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Secret = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Plant) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Plant: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Plant: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PlantList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PlantList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PlantList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Plant{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PlantSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PlantSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PlantSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoints = append(m.Endpoints, Endpoint{}) + if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PlantStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PlantStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PlantStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClusterInfo == nil { + m.ClusterInfo = &ClusterInfo{} + } + if err := m.ClusterInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Project) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Project: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Project: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Project{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectMember) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectMember: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectMember: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Subject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreatedBy == nil { + m.CreatedBy = &v13.Subject{} + } + if err := m.CreatedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Description = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Owner == nil { + m.Owner = &v13.Subject{} + } + if err := m.Owner.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Purpose = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, ProjectMember{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Namespace = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tolerations == nil { + m.Tolerations = &ProjectTolerations{} + } + if err := m.Tolerations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = ProjectPhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StaleSinceTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StaleSinceTimestamp == nil { + m.StaleSinceTimestamp = &v11.Time{} + } + if err := m.StaleSinceTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StaleAutoDeleteTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StaleAutoDeleteTimestamp == nil { + m.StaleAutoDeleteTimestamp = &v11.Time{} + } + if err := m.StaleAutoDeleteTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectTolerations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectTolerations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectTolerations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Defaults", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Defaults = append(m.Defaults, Toleration{}) + if err := m.Defaults[len(m.Defaults)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Whitelist", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Whitelist = append(m.Whitelist, Toleration{}) + if err := m.Whitelist[len(m.Whitelist)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Provider) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Provider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Provider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ControlPlaneConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ControlPlaneConfig == nil { + m.ControlPlaneConfig = &runtime.RawExtension{} + } + if err := m.ControlPlaneConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InfrastructureConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InfrastructureConfig == nil { + m.InfrastructureConfig = &runtime.RawExtension{} + } + if err := m.InfrastructureConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workers = append(m.Workers, Worker{}) + if err := m.Workers[len(m.Workers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Quota) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Quota: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Quota: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QuotaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QuotaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QuotaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Quota{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QuotaSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QuotaSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterLifetimeDays", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClusterLifetimeDays = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metrics == nil { + m.Metrics = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metrics[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Region) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Region: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Region: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Zones = append(m.Zones, AvailabilityZone{}) + if err := m.Zones[len(m.Zones)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CrossVersionObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CrossVersionObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceWatchCacheSize) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceWatchCacheSize: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceWatchCacheSize: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.APIGroup = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CacheSize", wireType) + } + m.CacheSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CacheSize |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quotas", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Quotas = append(m.Quotas, v1.ObjectReference{}) + if err := m.Quotas[len(m.Quotas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, SecretBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Seed) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Seed: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Seed: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedBackup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedBackup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedBackup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provider = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProviderConfig == nil { + m.ProviderConfig = &runtime.RawExtension{} + } + if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Region = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedDNS) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedDNS: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedDNS: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IngressDomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.IngressDomain = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Provider == nil { + m.Provider = &SeedDNSProvider{} + } + if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedDNSProvider) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedDNSProvider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedDNSProvider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domains", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Domains == nil { + m.Domains = &DNSIncludeExclude{} + } + if err := m.Domains.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Zones == nil { + m.Zones = &DNSIncludeExclude{} + } + if err := m.Zones.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Seed{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedNetworks) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedNetworks: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedNetworks: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Nodes = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pods = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Services = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShootDefaults", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ShootDefaults == nil { + m.ShootDefaults = &ShootNetworks{} + } + if err := m.ShootDefaults.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedProvider) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedProvider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedProvider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProviderConfig == nil { + m.ProviderConfig = &runtime.RawExtension{} + } + if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Region = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &v11.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProviderTypes = append(m.ProviderTypes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedSettingExcessCapacityReservation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedSettingExcessCapacityReservation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedSettingExcessCapacityReservation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Enabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedSettingLoadBalancerServices) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedSettingLoadBalancerServices: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedSettingLoadBalancerServices: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedSettingScheduling) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedSettingScheduling: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedSettingScheduling: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Visible", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Visible = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedSettingShootDNS) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedSettingShootDNS: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedSettingShootDNS: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Enabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedSettingVerticalPodAutoscaler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedSettingVerticalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedSettingVerticalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Enabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedSettings) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedSettings: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedSettings: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcessCapacityReservation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExcessCapacityReservation == nil { + m.ExcessCapacityReservation = &SeedSettingExcessCapacityReservation{} + } + if err := m.ExcessCapacityReservation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheduling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scheduling == nil { + m.Scheduling = &SeedSettingScheduling{} + } + if err := m.Scheduling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShootDNS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ShootDNS == nil { + m.ShootDNS = &SeedSettingShootDNS{} + } + if err := m.ShootDNS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerServices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LoadBalancerServices == nil { + m.LoadBalancerServices = &SeedSettingLoadBalancerServices{} + } + if err := m.LoadBalancerServices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VerticalPodAutoscaler", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VerticalPodAutoscaler == nil { + m.VerticalPodAutoscaler = &SeedSettingVerticalPodAutoscaler{} + } + if err := m.VerticalPodAutoscaler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Backup == nil { + m.Backup = &SeedBackup{} + } + if err := m.Backup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockCIDRs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BlockCIDRs = append(m.BlockCIDRs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DNS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Networks.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &v1.SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Taints = append(m.Taints, SeedTaint{}) + if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Volume == nil { + m.Volume = &SeedVolume{} + } + if err := m.Volume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Settings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Settings == nil { + m.Settings = &SeedSettings{} + } + if err := m.Settings.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Ingress == nil { + m.Ingress = &Ingress{} + } + if err := m.Ingress.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gardener", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Gardener == nil { + m.Gardener = &Gardener{} + } + if err := m.Gardener.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubernetesVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.KubernetesVersion = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIdentity", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ClusterIdentity = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedTaint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedTaint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedTaint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Value = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MinimumSize", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MinimumSize == nil { + m.MinimumSize = &resource.Quantity{} + } + if err := m.MinimumSize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Providers = append(m.Providers, SeedVolumeProvider{}) + if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedVolumeProvider) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedVolumeProvider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedVolumeProvider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Purpose = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Issuer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Issuer = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SigningKeySecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SigningKeySecret == nil { + m.SigningKeySecret = &v1.LocalObjectReference{} + } + if err := m.SigningKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Shoot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Shoot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Shoot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShootList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShootList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShootList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Shoot{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShootMachineImage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShootMachineImage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShootMachineImage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProviderConfig == nil { + m.ProviderConfig = &runtime.RawExtension{} + } + if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Version = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShootNetworks) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShootNetworks: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShootNetworks: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Pods = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Services = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShootSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShootSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShootSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addons", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Addons == nil { + m.Addons = &Addons{} + } + if err := m.Addons.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CloudProfileName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CloudProfileName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DNS == nil { + m.DNS = &DNS{} + } + if err := m.DNS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Extensions = append(m.Extensions, Extension{}) + if err := m.Extensions[len(m.Extensions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hibernation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hibernation == nil { + m.Hibernation = &Hibernation{} + } + if err := m.Hibernation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kubernetes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Kubernetes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networking", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Networking.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Maintenance", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Maintenance == nil { + m.Maintenance = &Maintenance{} + } + if err := m.Maintenance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Monitoring", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Monitoring == nil { + m.Monitoring = &Monitoring{} + } + if err := m.Monitoring.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ShootPurpose(dAtA[iNdEx:postIndex]) + m.Purpose = &s + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Region = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretBindingName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretBindingName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeedName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.SeedName = &s + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeedSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SeedSelector == nil { + m.SeedSelector = &SeedSelector{} + } + if err := m.SeedSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, NamedResourceReference{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShootState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShootState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShootState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShootStateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShootStateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShootStateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ShootState{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShootStateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShootStateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShootStateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gardener", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gardener = append(m.Gardener, GardenerResourceData{}) + if err := m.Gardener[len(m.Gardener)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Extensions = append(m.Extensions, ExtensionResourceState{}) + if err := m.Extensions[len(m.Extensions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, ResourceData{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShootStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShootStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShootStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Constraints = append(m.Constraints, Condition{}) + if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gardener", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Gardener.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsHibernated", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsHibernated = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastOperation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastOperation == nil { + m.LastOperation = &LastOperation{} + } + if err := m.LastOperation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastError == nil { + m.LastError = &LastError{} + } + if err := m.LastError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastErrors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastErrors = append(m.LastErrors, LastError{}) + if err := m.LastErrors[len(m.LastErrors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RetryCycleStartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RetryCycleStartTime == nil { + m.RetryCycleStartTime = &v11.Time{} + } + if err := m.RetryCycleStartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Seed", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Seed = &s + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TechnicalID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TechnicalID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIdentity", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ClusterIdentity = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Toleration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Toleration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Toleration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Value = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VerticalPodAutoscaler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VerticalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VerticalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Enabled = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvictAfterOOMThreshold", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EvictAfterOOMThreshold == nil { + m.EvictAfterOOMThreshold = &v11.Duration{} + } + if err := m.EvictAfterOOMThreshold.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EvictionRateBurst", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EvictionRateBurst = &v + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field EvictionRateLimit", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.EvictionRateLimit = &v2 + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field EvictionTolerance", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.EvictionTolerance = &v2 + case 6: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field RecommendationMarginFraction", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.RecommendationMarginFraction = &v2 + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdaterInterval", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdaterInterval == nil { + m.UpdaterInterval = &v11.Duration{} + } + if err := m.UpdaterInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RecommenderInterval", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RecommenderInterval == nil { + m.RecommenderInterval = &v11.Duration{} + } + if err := m.RecommenderInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Volume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Volume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Type = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeSize", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeSize = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Encrypted", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Encrypted = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeType) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeType: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeType: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Class", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Class = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Usable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Usable = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchCacheSizes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchCacheSizes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchCacheSizes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Default = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, ResourceWatchCacheSize{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Worker) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Worker: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Worker: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.CABundle = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CRI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CRI == nil { + m.CRI = &CRI{} + } + if err := m.CRI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kubernetes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kubernetes == nil { + m.Kubernetes = &WorkerKubernetes{} + } + if err := m.Kubernetes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Machine", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Machine.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Maximum", wireType) + } + m.Maximum = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Maximum |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Minimum", wireType) + } + m.Minimum = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Minimum |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProviderConfig == nil { + m.ProviderConfig = &runtime.RawExtension{} + } + if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Taints = append(m.Taints, v1.Taint{}) + if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Volume == nil { + m.Volume = &Volume{} + } + if err := m.Volume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataVolumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataVolumes = append(m.DataVolumes, DataVolume{}) + if err := m.DataVolumes[len(m.DataVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeletDataVolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.KubeletDataVolumeName = &s + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Zones = append(m.Zones, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SystemComponents", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SystemComponents == nil { + m.SystemComponents = &WorkerSystemComponents{} + } + if err := m.SystemComponents.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MachineControllerManagerSettings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MachineControllerManagerSettings == nil { + m.MachineControllerManagerSettings = &MachineControllerManagerSettings{} + } + if err := m.MachineControllerManagerSettings.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkerKubernetes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkerKubernetes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkerKubernetes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kubelet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kubelet == nil { + m.Kubelet = &KubeletConfig{} + } + if err := m.Kubelet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkerSystemComponents) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkerSystemComponents: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkerSystemComponents: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allow = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.proto b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.proto new file mode 100644 index 000000000..b50c3c854 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.proto @@ -0,0 +1,2344 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package github.com.gardener.gardener.pkg.apis.core.v1alpha1; + +import "github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto"; +import "k8s.io/api/autoscaling/v1/generated.proto"; +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/api/rbac/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// Addon allows enabling or disabling a specific addon and is used to derive from. +message Addon { + // Enabled indicates whether the addon is enabled or not. + optional bool enabled = 1; +} + +// Addons is a collection of configuration for specific addons which are managed by the Gardener. +message Addons { + // KubernetesDashboard holds configuration settings for the kubernetes dashboard addon. + // +optional + optional KubernetesDashboard kubernetesDashboard = 1; + + // NginxIngress holds configuration settings for the nginx-ingress addon. + // +optional + optional NginxIngress nginxIngress = 2; +} + +// AdmissionPlugin contains information about a specific admission plugin and its corresponding configuration. +message AdmissionPlugin { + // Name is the name of the plugin. + optional string name = 1; + + // Config is the configuration of the plugin. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension config = 2; +} + +// Alerting contains information about how alerting will be done (i.e. who will receive alerts and how). +message Alerting { + // MonitoringEmailReceivers is a list of recipients for alerts + // +optional + repeated string emailReceivers = 1; +} + +// AuditConfig contains settings for audit of the api server +message AuditConfig { + // AuditPolicy contains configuration settings for audit policy of the kube-apiserver. + // +optional + optional AuditPolicy auditPolicy = 1; +} + +// AuditPolicy contains audit policy for kube-apiserver +message AuditPolicy { + // ConfigMapRef is a reference to a ConfigMap object in the same namespace, + // which contains the audit policy for the kube-apiserver. + // +optional + optional k8s.io.api.core.v1.ObjectReference configMapRef = 1; +} + +// AvailabilityZone is an availability zone. +message AvailabilityZone { + // Name is an an availability zone name. + optional string name = 1; + + // UnavailableMachineTypes is a list of machine type names that are not availability in this zone. + // +optional + repeated string unavailableMachineTypes = 2; + + // UnavailableVolumeTypes is a list of volume type names that are not availability in this zone. + // +optional + repeated string unavailableVolumeTypes = 3; +} + +// BackupBucket holds details about backup bucket +message BackupBucket { + // Standard object metadata. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the Backup Bucket. + optional BackupBucketSpec spec = 2; + + // Most recently observed status of the Backup Bucket. + optional BackupBucketStatus status = 3; +} + +// BackupBucketList is a list of BackupBucket objects. +message BackupBucketList { + // Standard list object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of BackupBucket. + repeated BackupBucket items = 2; +} + +// BackupBucketProvider holds the details of cloud provider of the object store. +message BackupBucketProvider { + // Type is the type of provider. + optional string type = 1; + + // Region is the region of the bucket. + optional string region = 2; +} + +// BackupBucketSpec is the specification of a Backup Bucket. +message BackupBucketSpec { + // Provider hold the details of cloud provider of the object store. + optional BackupBucketProvider provider = 1; + + // ProviderConfig is the configuration passed to BackupBucket resource. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2; + + // SecretRef is a reference to a secret that contains the credentials to access object store. + optional k8s.io.api.core.v1.SecretReference secretRef = 3; + + // Seed holds the name of the seed allocated to BackupBucket for running controller. + // +optional + optional string seed = 4; +} + +// BackupBucketStatus holds the most recently observed status of the Backup Bucket. +message BackupBucketStatus { + // ProviderStatus is the configuration passed to BackupBucket resource. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerStatus = 1; + + // LastOperation holds information about the last operation on the BackupBucket. + // +optional + optional LastOperation lastOperation = 2; + + // LastError holds information about the last occurred error during an operation. + // +optional + optional LastError lastError = 3; + + // ObservedGeneration is the most recent generation observed for this BackupBucket. It corresponds to the + // BackupBucket's generation, which is updated on mutation by the API Server. + // +optional + optional int64 observedGeneration = 4; + + // GeneratedSecretRef is reference to the secret generated by backup bucket, which + // will have object store specific credentials. + // +optional + optional k8s.io.api.core.v1.SecretReference generatedSecretRef = 5; +} + +// BackupEntry holds details about shoot backup. +message BackupEntry { + // Standard object metadata. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec contains the specification of the Backup Entry. + // +optional + optional BackupEntrySpec spec = 2; + + // Status contains the most recently observed status of the Backup Entry. + // +optional + optional BackupEntryStatus status = 3; +} + +// BackupEntryList is a list of BackupEntry objects. +message BackupEntryList { + // Standard list object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of BackupEntry. + repeated BackupEntry items = 2; +} + +// BackupEntrySpec is the specification of a Backup Entry. +message BackupEntrySpec { + // BucketName is the name of backup bucket for this Backup Entry. + optional string bucketName = 1; + + // Seed holds the name of the seed allocated to BackupEntry for running controller. + // +optional + optional string seed = 2; +} + +// BackupEntryStatus holds the most recently observed status of the Backup Entry. +message BackupEntryStatus { + // LastOperation holds information about the last operation on the BackupEntry. + // +optional + optional LastOperation lastOperation = 1; + + // LastError holds information about the last occurred error during an operation. + // +optional + optional LastError lastError = 2; + + // ObservedGeneration is the most recent generation observed for this BackupEntry. It corresponds to the + // BackupEntry's generation, which is updated on mutation by the API Server. + // +optional + optional int64 observedGeneration = 3; +} + +// CRI contains information about the Container Runtimes. +message CRI { + // The name of the CRI library + optional string name = 1; + + // ContainerRuntimes is the list of the required container runtimes supported for a worker pool. + // +optional + repeated ContainerRuntime containerRuntimes = 2; +} + +// CloudInfo contains information about the cloud +message CloudInfo { + // Type is the cloud type + optional string type = 1; + + // Region is the cloud region + optional string region = 2; +} + +// CloudProfile represents certain properties about a provider environment. +message CloudProfile { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the provider environment properties. + // +optional + optional CloudProfileSpec spec = 2; +} + +// CloudProfileList is a collection of CloudProfiles. +message CloudProfileList { + // Standard list object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of CloudProfiles. + repeated CloudProfile items = 2; +} + +// CloudProfileSpec is the specification of a CloudProfile. +// It must contain exactly one of its defined keys. +message CloudProfileSpec { + // CABundle is a certificate bundle which will be installed onto every host machine of shoot cluster targeting this profile. + // +optional + optional string caBundle = 1; + + // Kubernetes contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification. + optional KubernetesSettings kubernetes = 2; + + // MachineImages contains constraints regarding allowed values for machine images in the Shoot specification. + // +patchMergeKey=name + // +patchStrategy=merge + repeated MachineImage machineImages = 3; + + // MachineTypes contains constraints regarding allowed values for machine types in the 'workers' block in the Shoot specification. + // +patchMergeKey=name + // +patchStrategy=merge + repeated MachineType machineTypes = 4; + + // ProviderConfig contains provider-specific configuration for the profile. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 5; + + // Regions contains constraints regarding allowed values for regions and zones. + // +patchMergeKey=name + // +patchStrategy=merge + repeated Region regions = 6; + + // SeedSelector contains an optional list of labels on `Seed` resources that marks those seeds whose shoots may use this provider profile. + // An empty list means that all seeds of the same provider type are supported. + // This is useful for environments that are of the same type (like openstack) but may have different "instances"/landscapes. + // Optionally a list of possible providers can be added to enable cross-provider scheduling. By default, the provider + // type of the seed must match the shoot's provider. + // +optional + optional SeedSelector seedSelector = 7; + + // Type is the name of the provider. + optional string type = 8; + + // VolumeTypes contains constraints regarding allowed values for volume types in the 'workers' block in the Shoot specification. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + repeated VolumeType volumeTypes = 9; +} + +// ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler. +message ClusterAutoscaler { + // ScaleDownDelayAfterAdd defines how long after scale up that scale down evaluation resumes (default: 1 hour). + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration scaleDownDelayAfterAdd = 1; + + // ScaleDownDelayAfterDelete how long after node deletion that scale down evaluation resumes, defaults to scanInterval (defaults to ScanInterval). + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration scaleDownDelayAfterDelete = 2; + + // ScaleDownDelayAfterFailure how long after scale down failure that scale down evaluation resumes (default: 3 mins). + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration scaleDownDelayAfterFailure = 3; + + // ScaleDownUnneededTime defines how long a node should be unneeded before it is eligible for scale down (default: 30 mins). + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration scaleDownUnneededTime = 4; + + // ScaleDownUtilizationThreshold defines the threshold in % under which a node is being removed + // +optional + optional double scaleDownUtilizationThreshold = 5; + + // ScanInterval how often cluster is reevaluated for scale up or down (default: 10 secs). + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration scanInterval = 6; +} + +// ClusterInfo contains information about the Plant cluster +message ClusterInfo { + // Cloud describes the cloud information + optional CloudInfo cloud = 1; + + // Kubernetes describes kubernetes meta information (e.g., version) + optional KubernetesInfo kubernetes = 2; +} + +// Condition holds the information about the state of a resource. +message Condition { + // Type of the Shoot condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // Last time the condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; + + // The reason for the condition's last transition. + optional string reason = 5; + + // A human readable message indicating details about the transition. + optional string message = 6; + + // Well-defined error codes in case the condition reports a problem. + // +optional + repeated string codes = 7; +} + +// ContainerRuntime contains information about worker's available container runtime +message ContainerRuntime { + // Type is the type of the Container Runtime. + optional string name = 1; + + // ProviderConfig is the configuration passed to the ContainerRuntime resource. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2; +} + +// ControllerDeployment contains information for how this controller is deployed. +message ControllerDeployment { + // Type is the deployment type. + optional string type = 1; + + // ProviderConfig contains type-specific configuration. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2; + + // Policy controls how the controller is deployed. It defaults to 'OnDemand'. + // +optional + optional string policy = 3; + + // SeedSelector contains an optional label selector for seeds. Only if the labels match then this controller will be + // considered for a deployment. + // An empty list means that all seeds are selected. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector seedSelector = 4; +} + +// ControllerInstallation represents an installation request for an external controller. +message ControllerInstallation { + // Standard object metadata. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec contains the specification of this installation. + optional ControllerInstallationSpec spec = 2; + + // Status contains the status of this installation. + optional ControllerInstallationStatus status = 3; +} + +// ControllerInstallationList is a collection of ControllerInstallations. +message ControllerInstallationList { + // Standard list object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ControllerInstallations. + repeated ControllerInstallation items = 2; +} + +// ControllerInstallationSpec is the specification of a ControllerInstallation. +message ControllerInstallationSpec { + // RegistrationRef is used to reference a ControllerRegistration resources. + optional k8s.io.api.core.v1.ObjectReference registrationRef = 1; + + // SeedRef is used to reference a Seed resources. + optional k8s.io.api.core.v1.ObjectReference seedRef = 2; +} + +// ControllerInstallationStatus is the status of a ControllerInstallation. +message ControllerInstallationStatus { + // Conditions represents the latest available observations of a ControllerInstallations's current state. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + repeated Condition conditions = 1; + + // ProviderStatus contains type-specific status. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerStatus = 2; +} + +// ControllerRegistration represents a registration of an external controller. +message ControllerRegistration { + // Standard object metadata. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec contains the specification of this registration. + optional ControllerRegistrationSpec spec = 2; +} + +// ControllerRegistrationList is a collection of ControllerRegistrations. +message ControllerRegistrationList { + // Standard list object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ControllerRegistrations. + repeated ControllerRegistration items = 2; +} + +// ControllerRegistrationSpec is the specification of a ControllerRegistration. +message ControllerRegistrationSpec { + // Resources is a list of combinations of kinds (DNSProvider, Infrastructure, Generic, ...) and their actual types + // (aws-route53, gcp, auditlog, ...). + // +optional + repeated ControllerResource resources = 1; + + // Deployment contains information for how this controller is deployed. + // +optional + optional ControllerDeployment deployment = 2; +} + +// ControllerResource is a combination of a kind (DNSProvider, Infrastructure, Generic, ...) and the actual type for this +// kind (aws-route53, gcp, auditlog, ...). +message ControllerResource { + // Kind is the resource kind, for example "OperatingSystemConfig". + optional string kind = 1; + + // Type is the resource type, for example "coreos" or "ubuntu". + optional string type = 2; + + // GloballyEnabled determines if this ControllerResource is required by all Shoot clusters. + // +optional + optional bool globallyEnabled = 3; + + // ReconcileTimeout defines how long Gardener should wait for the resource reconciliation. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration reconcileTimeout = 4; + + // Primary determines if the controller backed by this ControllerRegistration is responsible for the extension + // resource's lifecycle. This field defaults to true. There must be exactly one primary controller for this kind/type + // combination. + // +optional + optional bool primary = 5; +} + +// DNS holds information about the provider, the hosted zone id and the domain. +message DNS { + // Domain is the external available domain of the Shoot cluster. This domain will be written into the + // kubeconfig that is handed out to end-users. Once set it is immutable. + // +optional + optional string domain = 1; + + // Providers is a list of DNS providers that shall be enabled for this shoot cluster. Only relevant if + // not a default domain is used. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + repeated DNSProvider providers = 2; +} + +message DNSIncludeExclude { + // Include is a list of resources that shall be included. + // +optional + repeated string include = 1; + + // Exclude is a list of resources that shall be excluded. + // +optional + repeated string exclude = 2; +} + +// DNSProvider contains information about a DNS provider. +message DNSProvider { + // Domains contains information about which domains shall be included/excluded for this provider. + // +optional + optional DNSIncludeExclude domains = 1; + + // Primary indicates that this DNSProvider is used for shoot related domains. + // +optional + optional bool primary = 2; + + // SecretName is a name of a secret containing credentials for the stated domain and the + // provider. When not specified, the Gardener will use the cloud provider credentials referenced + // by the Shoot and try to find respective credentials there (primary provider only). Specifying this field may override + // this behavior, i.e. forcing the Gardener to only look into the given secret. + // +optional + optional string secretName = 3; + + // Type is the DNS provider type. + // +optional + optional string type = 4; + + // Zones contains information about which hosted zones shall be included/excluded for this provider. + // +optional + optional DNSIncludeExclude zones = 5; +} + +// DataVolume contains information about a data volume. +message DataVolume { + // Name of the volume to make it referencable. + optional string name = 1; + + // Type is the type of the volume. + // +optional + optional string type = 2; + + // VolumeSize is the size of the volume. + optional string size = 3; + + // Encrypted determines if the volume should be encrypted. + // +optional + optional bool encrypted = 4; +} + +// Endpoint is an endpoint for monitoring, logging and other services around the plant. +message Endpoint { + // Name is the name of the endpoint + optional string name = 1; + + // URL is the url of the endpoint + optional string url = 2; + + // Purpose is the purpose of the endpoint + optional string purpose = 3; +} + +// ExpirableVersion contains a version and an expiration date. +message ExpirableVersion { + // Version is the version identifier. + optional string version = 1; + + // ExpirationDate defines the time at which this version expires. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time expirationDate = 2; + + // Classification defines the state of a version (preview, supported, deprecated) + // +optional + optional string classification = 3; +} + +// Extension contains type and provider information for Shoot extensions. +message Extension { + // Type is the type of the extension resource. + optional string type = 1; + + // ProviderConfig is the configuration passed to extension resource. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2; + + // Disabled allows to disable extensions that were marked as 'globally enabled' by Gardener administrators. + // +optional + optional bool disabled = 3; +} + +// ExtensionResourceState contains the kind of the extension custom resource and its last observed state in the Shoot's +// namespace on the Seed cluster. +message ExtensionResourceState { + // Kind (type) of the extension custom resource + optional string kind = 1; + + // Name of the extension custom resource + // +optional + optional string name = 2; + + // Purpose of the extension custom resource + // +optional + optional string purpose = 3; + + // State of the extension resource + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension state = 4; + + // Resources holds a list of named resource references that can be referred to in the state by their names. + // +optional + repeated github.com.gardener.gardener.pkg.apis.core.v1beta1.NamedResourceReference resources = 5; +} + +// Gardener holds the information about the Gardener version that operated a resource. +message Gardener { + // ID is the Docker container id of the Gardener which last acted on a resource. + optional string id = 1; + + // Name is the hostname (pod name) of the Gardener which last acted on a resource. + optional string name = 2; + + // Version is the version of the Gardener which last acted on a resource. + optional string version = 3; +} + +// GardenerResourceData holds the data which is used to generate resources, deployed in the Shoot's control plane. +message GardenerResourceData { + // Name of the object required to generate resources + optional string name = 1; + + // Type of the object + optional string type = 2; + + // Data contains the payload required to generate resources + optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 3; +} + +// Hibernation contains information whether the Shoot is suspended or not. +message Hibernation { + // Enabled specifies whether the Shoot needs to be hibernated or not. If it is true, the Shoot's desired state is to be hibernated. + // If it is false or nil, the Shoot's desired state is to be awaken. + // +optional + optional bool enabled = 1; + + // Schedules determine the hibernation schedules. + // +optional + repeated HibernationSchedule schedules = 2; +} + +// HibernationSchedule determines the hibernation schedule of a Shoot. +// A Shoot will be regularly hibernated at each start time and will be woken up at each end time. +// Start or End can be omitted, though at least one of each has to be specified. +message HibernationSchedule { + // Start is a Cron spec at which time a Shoot will be hibernated. + // +optional + optional string start = 1; + + // End is a Cron spec at which time a Shoot will be woken up. + // +optional + optional string end = 2; + + // Location is the time location in which both start and and shall be evaluated. + // +optional + optional string location = 3; +} + +// HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager. +// Note: Descriptions were taken from the Kubernetes documentation. +message HorizontalPodAutoscalerConfig { + // The period after which a ready pod transition is considered to be the first. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration cpuInitializationPeriod = 1; + + // The period since last downscale, before another downscale can be performed in horizontal pod autoscaler. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration downscaleDelay = 2; + + // The configurable window at which the controller will choose the highest recommendation for autoscaling. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration downscaleStabilization = 3; + + // The configurable period at which the horizontal pod autoscaler considers a Pod “not yet ready” given that it’s unready and it has transitioned to unready during that time. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration initialReadinessDelay = 4; + + // The period for syncing the number of pods in horizontal pod autoscaler. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration syncPeriod = 5; + + // The minimum change (from 1.0) in the desired-to-actual metrics ratio for the horizontal pod autoscaler to consider scaling. + // +optional + optional double tolerance = 6; + + // The period since last upscale, before another upscale can be performed in horizontal pod autoscaler. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration upscaleDelay = 7; +} + +// Ingress configures the Ingress specific settings of the Seed cluster. +message Ingress { + // Domain specifies the IngressDomain of the Seed cluster pointing to the ingress controller endpoint. It will be used + // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable. + optional string domain = 1; + + // Controller configures a Gardener managed Ingress Controller listening on the ingressDomain + optional IngressController controller = 2; +} + +// IngressController enables a Gardener managed Ingress Controller listening on the ingressDomain +message IngressController { + // Kind defines which kind of IngressController to use, for example `nginx` + optional string kind = 1; + + // ProviderConfig specifies infrastructure specific configuration for the ingressController + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2; +} + +// KubeAPIServerConfig contains configuration settings for the kube-apiserver. +message KubeAPIServerConfig { + optional KubernetesConfig kubernetesConfig = 1; + + // AdmissionPlugins contains the list of user-defined admission plugins (additional to those managed by Gardener), and, if desired, the corresponding + // configuration. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + repeated AdmissionPlugin admissionPlugins = 2; + + // APIAudiences are the identifiers of the API. The service account token authenticator will + // validate that tokens used against the API are bound to at least one of these audiences. + // Defaults to ["kubernetes"]. + // +optional + repeated string apiAudiences = 3; + + // AuditConfig contains configuration settings for the audit of the kube-apiserver. + // +optional + optional AuditConfig auditConfig = 4; + + // EnableBasicAuthentication defines whether basic authentication should be enabled for this cluster or not. + // +optional + optional bool enableBasicAuthentication = 5; + + // OIDCConfig contains configuration settings for the OIDC provider. + // +optional + optional OIDCConfig oidcConfig = 6; + + // RuntimeConfig contains information about enabled or disabled APIs. + // +optional + map runtimeConfig = 7; + + // ServiceAccountConfig contains configuration settings for the service account handling + // of the kube-apiserver. + // +optional + optional ServiceAccountConfig serviceAccountConfig = 8; + + // WatchCacheSizes contains configuration of the API server's watch cache sizes. + // Configuring these flags might be useful for large-scale Shoot clusters with a lot of parallel update requests + // and a lot of watching controllers (e.g. large shooted Seed clusters). When the API server's watch cache's + // capacity is too small to cope with the amount of update requests and watchers for a particular resource, it + // might happen that controller watches are permanently stopped with `too old resource version` errors. + // Starting from kubernetes v1.19, the API server's watch cache size is adapted dynamically and setting the watch + // cache size flags will have no effect, except when setting it to 0 (which disables the watch cache). + // +optional + optional WatchCacheSizes watchCacheSizes = 9; + + // Requests contains configuration for request-specific settings for the kube-apiserver. + // +optional + optional KubeAPIServerRequests requests = 10; +} + +// KubeAPIServerRequests contains configuration for request-specific settings for the kube-apiserver. +message KubeAPIServerRequests { + // MaxNonMutatingInflight is the maximum number of non-mutating requests in flight at a given time. When the server + // exceeds this, it rejects requests. + // +optional + optional int32 maxNonMutatingInflight = 1; + + // MaxMutatingInflight is the maximum number of mutating requests in flight at a given time. When the server + // exceeds this, it rejects requests. + // +optional + optional int32 maxMutatingInflight = 2; +} + +// KubeControllerManagerConfig contains configuration settings for the kube-controller-manager. +message KubeControllerManagerConfig { + optional KubernetesConfig kubernetesConfig = 1; + + // HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager. + // +optional + optional HorizontalPodAutoscalerConfig horizontalPodAutoscaler = 2; + + // NodeCIDRMaskSize defines the mask size for node cidr in cluster (default is 24) + // +optional + optional int32 nodeCIDRMaskSize = 3; + + // PodEvictionTimeout defines the grace period for deleting pods on failed nodes. Defaults to 2m. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration podEvictionTimeout = 4; +} + +// KubeProxyConfig contains configuration settings for the kube-proxy. +message KubeProxyConfig { + optional KubernetesConfig kubernetesConfig = 1; + + // Mode specifies which proxy mode to use. + // defaults to IPTables. + // +optional + optional string mode = 2; +} + +// KubeSchedulerConfig contains configuration settings for the kube-scheduler. +message KubeSchedulerConfig { + optional KubernetesConfig kubernetesConfig = 1; + + // KubeMaxPDVols allows to configure the `KUBE_MAX_PD_VOLS` environment variable for the kube-scheduler. + // Please find more information here: https://kubernetes.io/docs/concepts/storage/storage-limits/#custom-limits + // Note that using this field is considered alpha-/experimental-level and is on your own risk. You should be aware + // of all the side-effects and consequences when changing it. + // +optional + optional string kubeMaxPDVols = 2; +} + +// KubeletConfig contains configuration settings for the kubelet. +message KubeletConfig { + optional KubernetesConfig kubernetesConfig = 1; + + // CPUCFSQuota allows you to disable/enable CPU throttling for Pods. + // +optional + optional bool cpuCFSQuota = 2; + + // CPUManagerPolicy allows to set alternative CPU management policies (default: none). + // +optional + optional string cpuManagerPolicy = 3; + + // EvictionHard describes a set of eviction thresholds (e.g. memory.available<1Gi) that if met would trigger a Pod eviction. + // +optional + // Default: + // memory.available: "100Mi/1Gi/5%" + // nodefs.available: "5%" + // nodefs.inodesFree: "5%" + // imagefs.available: "5%" + // imagefs.inodesFree: "5%" + optional KubeletConfigEviction evictionHard = 4; + + // EvictionMaxPodGracePeriod describes the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. + // +optional + // Default: 90 + optional int32 evictionMaxPodGracePeriod = 5; + + // EvictionMinimumReclaim configures the amount of resources below the configured eviction threshold that the kubelet attempts to reclaim whenever the kubelet observes resource pressure. + // +optional + // Default: 0 for each resource + optional KubeletConfigEvictionMinimumReclaim evictionMinimumReclaim = 6; + + // EvictionPressureTransitionPeriod is the duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. + // +optional + // Default: 4m0s + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration evictionPressureTransitionPeriod = 7; + + // EvictionSoft describes a set of eviction thresholds (e.g. memory.available<1.5Gi) that if met over a corresponding grace period would trigger a Pod eviction. + // +optional + // Default: + // memory.available: "200Mi/1.5Gi/10%" + // nodefs.available: "10%" + // nodefs.inodesFree: "10%" + // imagefs.available: "10%" + // imagefs.inodesFree: "10%" + optional KubeletConfigEviction evictionSoft = 8; + + // EvictionSoftGracePeriod describes a set of eviction grace periods (e.g. memory.available=1m30s) that correspond to how long a soft eviction threshold must hold before triggering a Pod eviction. + // +optional + // Default: + // memory.available: 1m30s + // nodefs.available: 1m30s + // nodefs.inodesFree: 1m30s + // imagefs.available: 1m30s + // imagefs.inodesFree: 1m30s + optional KubeletConfigEvictionSoftGracePeriod evictionSoftGracePeriod = 9; + + // MaxPods is the maximum number of Pods that are allowed by the Kubelet. + // +optional + // Default: 110 + optional int32 maxPods = 10; + + // PodPIDsLimit is the maximum number of process IDs per pod allowed by the kubelet. + // +optional + optional int64 podPidsLimit = 11; + + // ImagePullProgressDeadline describes the time limit under which if no pulling progress is made, the image pulling will be cancelled. + // +optional + // Default: 1m + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration imagePullProgressDeadline = 12; + + // FailSwapOn makes the Kubelet fail to start if swap is enabled on the node. (default true). + // +optional + optional bool failSwapOn = 13; + + // KubeReserved is the configuration for resources reserved for kubernetes node components (mainly kubelet and container runtime). + // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied. + // +optional + // Default: cpu=80m,memory=1Gi,pid=20k + optional KubeletConfigReserved kubeReserved = 14; + + // SystemReserved is the configuration for resources reserved for system processes not managed by kubernetes (e.g. journald). + // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied. + // +optional + optional KubeletConfigReserved systemReserved = 15; +} + +// KubeletConfigEviction contains kubelet eviction thresholds supporting either a resource.Quantity or a percentage based value. +message KubeletConfigEviction { + // MemoryAvailable is the threshold for the free memory on the host server. + // +optional + optional string memoryAvailable = 1; + + // ImageFSAvailable is the threshold for the free disk space in the imagefs filesystem (docker images and container writable layers). + // +optional + optional string imageFSAvailable = 2; + + // ImageFSInodesFree is the threshold for the available inodes in the imagefs filesystem. + // +optional + optional string imageFSInodesFree = 3; + + // NodeFSAvailable is the threshold for the free disk space in the nodefs filesystem (docker volumes, logs, etc). + // +optional + optional string nodeFSAvailable = 4; + + // NodeFSInodesFree is the threshold for the available inodes in the nodefs filesystem. + // +optional + optional string nodeFSInodesFree = 5; +} + +// KubeletConfigEvictionMinimumReclaim contains configuration for the kubelet eviction minimum reclaim. +message KubeletConfigEvictionMinimumReclaim { + // MemoryAvailable is the threshold for the memory reclaim on the host server. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity memoryAvailable = 1; + + // ImageFSAvailable is the threshold for the disk space reclaim in the imagefs filesystem (docker images and container writable layers). + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity imageFSAvailable = 2; + + // ImageFSInodesFree is the threshold for the inodes reclaim in the imagefs filesystem. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity imageFSInodesFree = 3; + + // NodeFSAvailable is the threshold for the disk space reclaim in the nodefs filesystem (docker volumes, logs, etc). + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity nodeFSAvailable = 4; + + // NodeFSInodesFree is the threshold for the inodes reclaim in the nodefs filesystem. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity nodeFSInodesFree = 5; +} + +// KubeletConfigEvictionSoftGracePeriod contains grace periods for kubelet eviction thresholds. +message KubeletConfigEvictionSoftGracePeriod { + // MemoryAvailable is the grace period for the MemoryAvailable eviction threshold. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration memoryAvailable = 1; + + // ImageFSAvailable is the grace period for the ImageFSAvailable eviction threshold. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration imageFSAvailable = 2; + + // ImageFSInodesFree is the grace period for the ImageFSInodesFree eviction threshold. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration imageFSInodesFree = 3; + + // NodeFSAvailable is the grace period for the NodeFSAvailable eviction threshold. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration nodeFSAvailable = 4; + + // NodeFSInodesFree is the grace period for the NodeFSInodesFree eviction threshold. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration nodeFSInodesFree = 5; +} + +// KubeletConfigReserved contains reserved resources for daemons +message KubeletConfigReserved { + // CPU is the reserved cpu. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity cpu = 1; + + // Memory is the reserved memory. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity memory = 2; + + // EphemeralStorage is the reserved ephemeral-storage. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity ephemeralStorage = 3; + + // PID is the reserved process-ids. + // To reserve PID, the SupportNodePidsLimit feature gate must be enabled in Kubernetes versions < 1.15. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity pid = 4; +} + +// Kubernetes contains the version and configuration variables for the Shoot control plane. +message Kubernetes { + // AllowPrivilegedContainers indicates whether privileged containers are allowed in the Shoot (default: true). + // +optional + optional bool allowPrivilegedContainers = 1; + + // ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler. + // +optional + optional ClusterAutoscaler clusterAutoscaler = 2; + + // KubeAPIServer contains configuration settings for the kube-apiserver. + // +optional + optional KubeAPIServerConfig kubeAPIServer = 3; + + // KubeControllerManager contains configuration settings for the kube-controller-manager. + // +optional + optional KubeControllerManagerConfig kubeControllerManager = 4; + + // KubeScheduler contains configuration settings for the kube-scheduler. + // +optional + optional KubeSchedulerConfig kubeScheduler = 5; + + // KubeProxy contains configuration settings for the kube-proxy. + // +optional + optional KubeProxyConfig kubeProxy = 6; + + // Kubelet contains configuration settings for the kubelet. + // +optional + optional KubeletConfig kubelet = 7; + + // Version is the semantic Kubernetes version to use for the Shoot cluster. + optional string version = 8; + + // VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler. + // +optional + optional VerticalPodAutoscaler verticalPodAutoscaler = 9; +} + +// KubernetesConfig contains common configuration fields for the control plane components. +message KubernetesConfig { + // FeatureGates contains information about enabled feature gates. + // +optional + map featureGates = 1; +} + +// KubernetesDashboard describes configuration values for the kubernetes-dashboard addon. +message KubernetesDashboard { + optional Addon addon = 2; + + // AuthenticationMode defines the authentication mode for the kubernetes-dashboard. + // +optional + optional string authenticationMode = 1; +} + +// KubernetesInfo contains the version and configuration variables for the Plant cluster. +message KubernetesInfo { + // Version is the semantic Kubernetes version to use for the Plant cluster. + optional string version = 1; +} + +// KubernetesSettings contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification. +message KubernetesSettings { + // Versions is the list of allowed Kubernetes versions with optional expiration dates for Shoot clusters. + // +patchMergeKey=version + // +patchStrategy=merge + // +optional + repeated ExpirableVersion versions = 1; +} + +// LastError indicates the last occurred error for an operation on a resource. +message LastError { + // A human readable message indicating details about the last error. + optional string description = 1; + + // ID of the task which caused this last error + // +optional + optional string taskID = 2; + + // Well-defined error codes of the last error(s). + // +optional + repeated string codes = 3; + + // Last time the error was reported + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; +} + +// LastOperation indicates the type and the state of the last operation, along with a description +// message and a progress indicator. +message LastOperation { + // A human readable message indicating details about the last operation. + optional string description = 1; + + // Last time the operation state transitioned from one to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 2; + + // The progress in percentage (0-100) of the last operation. + optional int32 progress = 3; + + // Status of the last operation, one of Aborted, Processing, Succeeded, Error, Failed. + optional string state = 4; + + // Type of the last operation, one of Create, Reconcile, Delete. + optional string type = 5; +} + +// Machine contains information about the machine type and image. +message Machine { + // Type is the machine type of the worker group. + optional string type = 1; + + // Image holds information about the machine image to use for all nodes of this pool. It will default to the + // latest version of the first image stated in the referenced CloudProfile if no value has been provided. + // +optional + optional ShootMachineImage image = 2; +} + +// MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout. +message MachineControllerManagerSettings { + // MachineDrainTimeout is the period after which machine is forcefully deleted. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration machineDrainTimeout = 1; + + // MachineHealthTimeout is the period after which machine is declared failed. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration machineHealthTimeout = 2; + + // MachineCreationTimeout is the period after which creation of the machine is declared failed. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration machineCreationTimeout = 3; + + // MaxEvictRetries are the number of eviction retries on a pod after which drain is declared failed, and forceful deletion is triggered. + // +optional + optional int32 maxEvictRetries = 4; + + // NodeConditions are the set of conditions if set to true for the period of MachineHealthTimeout, machine will be declared failed. + // +optional + repeated string nodeConditions = 5; +} + +// MachineImage defines the name and multiple versions of the machine image in any environment. +message MachineImage { + // Name is the name of the image. + optional string name = 1; + + // Versions contains versions, expiration dates and container runtimes of the machine image + // +patchMergeKey=version + // +patchStrategy=merge + repeated MachineImageVersion versions = 2; +} + +// MachineImageVersion is an expirable version with list of supported container runtimes and interfaces +message MachineImageVersion { + optional ExpirableVersion expirableVersion = 1; + + // CRI list of supported container runtime and interfaces supported by this version + // +optional + repeated CRI cri = 2; +} + +// MachineType contains certain properties of a machine type. +message MachineType { + // CPU is the number of CPUs for this machine type. + optional k8s.io.apimachinery.pkg.api.resource.Quantity cpu = 1; + + // GPU is the number of GPUs for this machine type. + optional k8s.io.apimachinery.pkg.api.resource.Quantity gpu = 2; + + // Memory is the amount of memory for this machine type. + optional k8s.io.apimachinery.pkg.api.resource.Quantity memory = 3; + + // Name is the name of the machine type. + optional string name = 4; + + // Storage is the amount of storage associated with the root volume of this machine type. + // +optional + optional MachineTypeStorage storage = 5; + + // Usable defines if the machine type can be used for shoot clusters. + // +optional + optional bool usable = 6; +} + +// MachineTypeStorage is the amount of storage associated with the root volume of this machine type. +message MachineTypeStorage { + // Class is the class of the storage type. + optional string class = 1; + + // StorageSize is the storage size. + optional k8s.io.apimachinery.pkg.api.resource.Quantity size = 2; + + // Type is the type of the storage. + optional string type = 3; +} + +// Maintenance contains information about the time window for maintenance operations and which +// operations should be performed. +message Maintenance { + // AutoUpdate contains information about which constraints should be automatically updated. + // +optional + optional MaintenanceAutoUpdate autoUpdate = 1; + + // TimeWindow contains information about the time window for maintenance operations. + // +optional + optional MaintenanceTimeWindow timeWindow = 2; + + // ConfineSpecUpdateRollout prevents that changes/updates to the shoot specification will be rolled out immediately. + // Instead, they are rolled out during the shoot's maintenance time window. There is one exception that will trigger + // an immediate roll out which is changes to the Spec.Hibernation.Enabled field. + // +optional + optional bool confineSpecUpdateRollout = 3; +} + +// MaintenanceAutoUpdate contains information about which constraints should be automatically updated. +message MaintenanceAutoUpdate { + // KubernetesVersion indicates whether the patch Kubernetes version may be automatically updated (default: true). + optional bool kubernetesVersion = 1; + + // MachineImageVersion indicates whether the machine image version may be automatically updated (default: true). + optional bool machineImageVersion = 2; +} + +// MaintenanceTimeWindow contains information about the time window for maintenance operations. +message MaintenanceTimeWindow { + // Begin is the beginning of the time window in the format HHMMSS+ZONE, e.g. "220000+0100". + // If not present, a random value will be computed. + optional string begin = 1; + + // End is the end of the time window in the format HHMMSS+ZONE, e.g. "220000+0100". + // If not present, the value will be computed based on the "Begin" value. + optional string end = 2; +} + +// Monitoring contains information about the monitoring configuration for the shoot. +message Monitoring { + // Alerting contains information about the alerting configuration for the shoot cluster. + // +optional + optional Alerting alerting = 1; +} + +// NamedResourceReference is a named reference to a resource. +message NamedResourceReference { + // Name of the resource reference. + optional string name = 1; + + // ResourceRef is a reference to a resource. + optional k8s.io.api.autoscaling.v1.CrossVersionObjectReference resourceRef = 2; +} + +// Networking defines networking parameters for the shoot cluster. +message Networking { + // Type identifies the type of the networking plugin. + optional string type = 1; + + // ProviderConfig is the configuration passed to network resource. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2; + + // Pods is the CIDR of the pod network. + // +optional + optional string pods = 3; + + // Nodes is the CIDR of the entire node network. + // +optional + optional string nodes = 4; + + // Services is the CIDR of the service network. + // +optional + optional string services = 5; +} + +// NginxIngress describes configuration values for the nginx-ingress addon. +message NginxIngress { + optional Addon addon = 4; + + // LoadBalancerSourceRanges is list of allowed IP sources for NginxIngress + // +optional + repeated string loadBalancerSourceRanges = 1; + + // Config contains custom configuration for the nginx-ingress-controller configuration. + // See https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md#configuration-options + // +optional + map config = 2; + + // ExternalTrafficPolicy controls the `.spec.externalTrafficPolicy` value of the load balancer `Service` + // exposing the nginx-ingress. Defaults to `Cluster`. + // +optional + optional string externalTrafficPolicy = 3; +} + +// OIDCConfig contains configuration settings for the OIDC provider. +// Note: Descriptions were taken from the Kubernetes documentation. +message OIDCConfig { + // If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used. + // +optional + optional string caBundle = 1; + + // ClientAuthentication can optionally contain client configuration used for kubeconfig generation. + // +optional + optional OpenIDConnectClientAuthentication clientAuthentication = 2; + + // The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set. + // +optional + optional string clientID = 3; + + // If provided, the name of a custom OpenID Connect claim for specifying user groups. The claim value is expected to be a string or array of strings. This flag is experimental, please see the authentication documentation for further details. + // +optional + optional string groupsClaim = 4; + + // If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies. + // +optional + optional string groupsPrefix = 5; + + // The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT). + // +optional + optional string issuerURL = 6; + + // ATTENTION: Only meaningful for Kubernetes >= 1.11 + // key=value pairs that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value. + // +optional + map requiredClaims = 7; + + // List of allowed JOSE asymmetric signing algorithms. JWTs with a 'alg' header value not in this list will be rejected. Values are defined by RFC 7518 https://tools.ietf.org/html/rfc7518#section-3.1 + // +optional + repeated string signingAlgs = 8; + + // The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable. This flag is experimental, please see the authentication documentation for further details. (default "sub") + // +optional + optional string usernameClaim = 9; + + // If provided, all usernames will be prefixed with this value. If not provided, username claims other than 'email' are prefixed by the issuer URL to avoid clashes. To skip any prefixing, provide the value '-'. + // +optional + optional string usernamePrefix = 10; +} + +// OpenIDConnectClientAuthentication contains configuration for OIDC clients. +message OpenIDConnectClientAuthentication { + // Extra configuration added to kubeconfig's auth-provider. + // Must not be any of idp-issuer-url, client-id, client-secret, idp-certificate-authority, idp-certificate-authority-data, id-token or refresh-token + // +optional + map extraConfig = 1; + + // The client Secret for the OpenID Connect client. + // +optional + optional string secret = 2; +} + +message Plant { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec contains the specification of this Plant. + optional PlantSpec spec = 2; + + // Status contains the status of this Plant. + optional PlantStatus status = 3; +} + +// PlantList is a collection of Plants. +message PlantList { + // Standard list object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Plants. + repeated Plant items = 2; +} + +// PlantSpec is the specification of a Plant. +message PlantSpec { + // SecretRef is a reference to a Secret object containing the Kubeconfig of the external kubernetes + // clusters to be added to Gardener. + optional k8s.io.api.core.v1.LocalObjectReference secretRef = 1; + + // Endpoints is the configuration plant endpoints + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + repeated Endpoint endpoints = 2; +} + +// PlantStatus is the status of a Plant. +message PlantStatus { + // Conditions represents the latest available observations of a Plant's current state. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + repeated Condition conditions = 1; + + // ObservedGeneration is the most recent generation observed for this Plant. It corresponds to the + // Plant's generation, which is updated on mutation by the API Server. + // +optional + optional int64 observedGeneration = 2; + + // ClusterInfo is additional computed information about the newly added cluster (Plant) + optional ClusterInfo clusterInfo = 3; +} + +// Project holds certain properties about a Gardener project. +message Project { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the project properties. + // +optional + optional ProjectSpec spec = 2; + + // Most recently observed status of the Project. + // +optional + optional ProjectStatus status = 3; +} + +// ProjectList is a collection of Projects. +message ProjectList { + // Standard list object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Projects. + repeated Project items = 2; +} + +// ProjectMember is a member of a project. +message ProjectMember { + // Subject is representing a user name, an email address, or any other identifier of a user, group, or service + // account that has a certain role. + optional k8s.io.api.rbac.v1.Subject subject = 1; + + // Role represents the role of this member. + // IMPORTANT: Be aware that this field will be removed in the `v1` version of this API in favor of the `roles` + // list. + // TODO: Remove this field in favor of the `owner` role in `v1`. + optional string role = 2; + + // Roles represents the list of roles of this member. + // +optional + repeated string roles = 3; +} + +// ProjectSpec is the specification of a Project. +message ProjectSpec { + // CreatedBy is a subject representing a user name, an email address, or any other identifier of a user + // who created the project. + // +optional + optional k8s.io.api.rbac.v1.Subject createdBy = 1; + + // Description is a human-readable description of what the project is used for. + // +optional + optional string description = 2; + + // Owner is a subject representing a user name, an email address, or any other identifier of a user owning + // the project. + // IMPORTANT: Be aware that this field will be removed in the `v1` version of this API in favor of the `owner` + // role. The only way to change the owner will be by moving the `owner` role. In this API version the only way + // to change the owner is to use this field. + // +optional + // TODO: Remove this field in favor of the `owner` role in `v1`. + optional k8s.io.api.rbac.v1.Subject owner = 3; + + // Purpose is a human-readable explanation of the project's purpose. + // +optional + optional string purpose = 4; + + // Members is a list of subjects representing a user name, an email address, or any other identifier of a user, + // group, or service account that has a certain role. + // +optional + repeated ProjectMember members = 5; + + // Namespace is the name of the namespace that has been created for the Project object. + // A nil value means that Gardener will determine the name of the namespace. + // +optional + optional string namespace = 6; + + // Tolerations contains the default tolerations and a whitelist for taints on seed clusters. + // +optional + optional ProjectTolerations tolerations = 7; +} + +// ProjectStatus holds the most recently observed status of the project. +message ProjectStatus { + // ObservedGeneration is the most recent generation observed for this project. + // +optional + optional int64 observedGeneration = 1; + + // Phase is the current phase of the project. + optional string phase = 2; + + // StaleSinceTimestamp contains the timestamp when the project was first discovered to be stale/unused. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time staleSinceTimestamp = 3; + + // StaleAutoDeleteTimestamp contains the timestamp when the project will be garbage-collected/automatically deleted + // because it's stale/unused. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time staleAutoDeleteTimestamp = 4; +} + +// ProjectTolerations contains the tolerations for taints on seed clusters. +message ProjectTolerations { + // Defaults contains a list of tolerations that are added to the shoots in this project by default. + // +patchMergeKey=key + // +patchStrategy=merge + // +optional + repeated Toleration defaults = 1; + + // Whitelist contains a list of tolerations that are allowed to be added to the shoots in this project. Please note + // that this list may only be added by users having the `spec-tolerations-whitelist` verb for project resources. + // +patchMergeKey=key + // +patchStrategy=merge + // +optional + repeated Toleration whitelist = 2; +} + +// Provider contains provider-specific information that are handed-over to the provider-specific +// extension controller. +message Provider { + // Type is the type of the provider. + optional string type = 1; + + // ControlPlaneConfig contains the provider-specific control plane config blob. Please look up the concrete + // definition in the documentation of your provider extension. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension controlPlaneConfig = 2; + + // InfrastructureConfig contains the provider-specific infrastructure config blob. Please look up the concrete + // definition in the documentation of your provider extension. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension infrastructureConfig = 3; + + // Workers is a list of worker groups. + // +patchMergeKey=name + // +patchStrategy=merge + repeated Worker workers = 4; +} + +message Quota { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the Quota constraints. + // +optional + optional QuotaSpec spec = 2; +} + +// QuotaList is a collection of Quotas. +message QuotaList { + // Standard list object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Quotas. + repeated Quota items = 2; +} + +// QuotaSpec is the specification of a Quota. +message QuotaSpec { + // ClusterLifetimeDays is the lifetime of a Shoot cluster in days before it will be terminated automatically. + // +optional + optional int32 clusterLifetimeDays = 1; + + // Metrics is a list of resources which will be put under constraints. + map metrics = 2; + + // Scope is the scope of the Quota object, either 'project' or 'secret'. + optional k8s.io.api.core.v1.ObjectReference scope = 3; +} + +// Region contains certain properties of a region. +message Region { + // Name is a region name. + optional string name = 1; + + // Zones is a list of availability zones in this region. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + repeated AvailabilityZone zones = 2; + + // Labels is an optional set of key-value pairs that contain certain administrator-controlled labels for this region. + // It can be used by Gardener administrators/operators to provide additional information about a region, e.g. wrt + // quality, reliability, access restrictions, etc. + // +optional + map labels = 3; +} + +// ResourceData holds the data of a resource referred to by an extension controller state. +message ResourceData { + optional k8s.io.api.autoscaling.v1.CrossVersionObjectReference ref = 1; + + // Data of the resource + optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; +} + +// ResourceWatchCacheSize contains configuration of the API server's watch cache size for one specific resource. +message ResourceWatchCacheSize { + // APIGroup is the API group of the resource for which the watch cache size should be configured. + // An unset value is used to specify the legacy core API (e.g. for `secrets`). + // +optional + optional string apiGroup = 1; + + // Resource is the name of the resource for which the watch cache size should be configured + // (in lowercase plural form, e.g. `secrets`). + optional string resource = 2; + + // CacheSize specifies the watch cache size that should be configured for the specified resource. + optional int32 size = 3; +} + +message SecretBinding { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // SecretRef is a reference to a secret object in the same or another namespace. + optional k8s.io.api.core.v1.SecretReference secretRef = 2; + + // Quotas is a list of references to Quota objects in the same or another namespace. + // +optional + repeated k8s.io.api.core.v1.ObjectReference quotas = 3; +} + +// SecretBindingList is a collection of SecretBindings. +message SecretBindingList { + // Standard list object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of SecretBindings. + repeated SecretBinding items = 2; +} + +// Seed represents an installation request for an external controller. +message Seed { + // Standard object metadata. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec contains the specification of this installation. + optional SeedSpec spec = 2; + + // Status contains the status of this installation. + optional SeedStatus status = 3; +} + +// SeedBackup contains the object store configuration for backups for shoot (currently only etcd). +message SeedBackup { + // Provider is a provider name. + optional string provider = 1; + + // ProviderConfig is the configuration passed to BackupBucket resource. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2; + + // Region is a region name. + // +optional + optional string region = 3; + + // SecretRef is a reference to a Secret object containing the cloud provider credentials for + // the object store where backups should be stored. It should have enough privileges to manipulate + // the objects as well as buckets. + optional k8s.io.api.core.v1.SecretReference secretRef = 4; +} + +// SeedDNS contains DNS-relevant information about this seed cluster. +message SeedDNS { + // IngressDomain is the domain of the Seed cluster pointing to the ingress controller endpoint. It will be used + // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable. + // This will be removed in the next API version and replaced by spec.ingress.domain. + // +optional + optional string ingressDomain = 1; + + // Provider configures a DNSProvider + // +optional + optional SeedDNSProvider provider = 2; +} + +// SeedDNSProvider configures a DNSProvider +message SeedDNSProvider { + // Type describes the type of the dns-provider, for example `aws-route53` + optional string type = 1; + + // SecretRef is a reference to a Secret object containing cloud provider credentials used for registering external domains. + optional k8s.io.api.core.v1.SecretReference secretRef = 2; + + // Domains contains information about which domains shall be included/excluded for this provider. + // +optional + optional DNSIncludeExclude domains = 3; + + // Zones contains information about which hosted zones shall be included/excluded for this provider. + // +optional + optional DNSIncludeExclude zones = 4; +} + +// SeedList is a collection of Seeds. +message SeedList { + // Standard list object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Seeds. + repeated Seed items = 2; +} + +// SeedNetworks contains CIDRs for the pod, service and node networks of a Kubernetes cluster. +message SeedNetworks { + // Nodes is the CIDR of the node network. + // +optional + optional string nodes = 1; + + // Pods is the CIDR of the pod network. + optional string pods = 2; + + // Services is the CIDR of the service network. + optional string services = 3; + + // ShootDefaults contains the default networks CIDRs for shoots. + // +optional + optional ShootNetworks shootDefaults = 4; +} + +// SeedProvider defines the provider type and region for this Seed cluster. +message SeedProvider { + // Type is the name of the provider. + optional string type = 1; + + // ProviderConfig is the configuration passed to Seed resource. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2; + + // Region is a name of a region. + optional string region = 3; +} + +// SeedSelector contains constraints for selecting seed to be usable for shoots using a profile +message SeedSelector { + // LabelSelector is optional and can be used to select seeds by their label settings + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; + + // Providers is optional and can be used by restricting seeds by their provider type. '*' can be used to enable seeds regardless of their provider type. + // +optional + repeated string providerTypes = 2; +} + +// SeedSettingExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the +// seed. When enabled then this is done via PodPriority and requires the Seed cluster to have Kubernetes version 1.11 +// or the PodPriority feature gate as well as the scheduling.k8s.io/v1alpha1 API group enabled. +message SeedSettingExcessCapacityReservation { + // Enabled controls whether the excess capacity reservation should be enabled. + optional bool enabled = 1; +} + +// SeedSettingLoadBalancerServices controls certain settings for services of type load balancer that are created in the +// seed. +message SeedSettingLoadBalancerServices { + // Annotations is a map of annotations that will be injected/merged into every load balancer service object. + // +optional + map annotations = 1; +} + +// SeedSettingScheduling controls settings for scheduling decisions for the seed. +message SeedSettingScheduling { + // Visible controls whether the gardener-scheduler shall consider this seed when scheduling shoots. Invisible seeds + // are not considered by the scheduler. + optional bool visible = 1; +} + +// SeedSettingShootDNS controls the shoot DNS settings for the seed. +message SeedSettingShootDNS { + // Enabled controls whether the DNS for shoot clusters should be enabled. When disabled then all shoots using the + // seed won't get any DNS providers, DNS records, and no DNS extension controller is required to be installed here. + // This is useful for environments where DNS is not required. + optional bool enabled = 1; +} + +// SeedSettingVerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the +// seed. +message SeedSettingVerticalPodAutoscaler { + // Enabled controls whether the VPA components shall be deployed into the garden namespace in the seed cluster. It + // is enabled by default because Gardener heavily relies on a VPA being deployed. You should only disable this if + // your seed cluster already has another, manually/custom managed VPA deployment. + optional bool enabled = 1; +} + +// SeedSettings contains certain settings for this seed cluster. +message SeedSettings { + // ExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the seed. + // +optional + optional SeedSettingExcessCapacityReservation excessCapacityReservation = 1; + + // Scheduling controls settings for scheduling decisions for the seed. + // +optional + optional SeedSettingScheduling scheduling = 2; + + // ShootDNS controls the shoot DNS settings for the seed. + // +optional + optional SeedSettingShootDNS shootDNS = 3; + + // LoadBalancerServices controls certain settings for services of type load balancer that are created in the + // seed. + // +optional + optional SeedSettingLoadBalancerServices loadBalancerServices = 4; + + // VerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the seed. + // +optional + optional SeedSettingVerticalPodAutoscaler verticalPodAutoscaler = 5; +} + +// SeedSpec is the specification of a Seed. +message SeedSpec { + // Backup holds the object store configuration for the backups of shoot (currently only etcd). + // If it is not specified, then there won't be any backups taken for shoots associated with this seed. + // If backup field is present in seed, then backups of the etcd from shoot control plane will be stored + // under the configured object store. + // +optional + optional SeedBackup backup = 1; + + // BlockCIDRs is a list of network addresses that should be blocked for shoot control plane components running + // in the seed cluster. + // +optional + repeated string blockCIDRs = 2; + + // DNS contains DNS-relevant information about this seed cluster. + optional SeedDNS dns = 3; + + // Networks defines the pod, service and worker network of the Seed cluster. + optional SeedNetworks networks = 4; + + // Provider defines the provider type and region for this Seed cluster. + optional SeedProvider provider = 5; + + // SecretRef is a reference to a Secret object containing the Kubeconfig and the cloud provider credentials for + // the account the Seed cluster has been deployed to. + // +optional + optional k8s.io.api.core.v1.SecretReference secretRef = 6; + + // Taints describes taints on the seed. + // +optional + repeated SeedTaint taints = 7; + + // Volume contains settings for persistentvolumes created in the seed cluster. + // +optional + optional SeedVolume volume = 8; + + // Settings contains certain settings for this seed cluster. + // +optional + optional SeedSettings settings = 9; + + // Ingress configures Ingress specific settings of the Seed cluster. + // +optional + optional Ingress ingress = 10; +} + +// SeedStatus is the status of a Seed. +message SeedStatus { + // Conditions represents the latest available observations of a Seed's current state. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + repeated Condition conditions = 1; + + // Gardener holds information about the Gardener instance which last acted on the Seed. + // +optional + optional Gardener gardener = 2; + + // KubernetesVersion is the Kubernetes version of the seed cluster. + // +optional + optional string kubernetesVersion = 3; + + // ObservedGeneration is the most recent generation observed for this Seed. It corresponds to the + // Seed's generation, which is updated on mutation by the API Server. + // +optional + optional int64 observedGeneration = 4; + + // ClusterIdentity is the identity of the Seed cluster + // +optional + optional string clusterIdentity = 5; +} + +// SeedTaint describes a taint on a seed. +message SeedTaint { + // Key is the taint key to be applied to a seed. + optional string key = 1; + + // Value is the taint value corresponding to the taint key. + // +optional + optional string value = 2; +} + +// SeedVolume contains settings for persistentvolumes created in the seed cluster. +message SeedVolume { + // MinimumSize defines the minimum size that should be used for PVCs in the seed. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity minimumSize = 1; + + // Providers is a list of storage class provisioner types for the seed. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + repeated SeedVolumeProvider providers = 2; +} + +// SeedVolumeProvider is a storage class provisioner type. +message SeedVolumeProvider { + // Purpose is the purpose of this provider. + optional string purpose = 1; + + // Name is the name of the storage class provisioner type. + optional string name = 2; +} + +// ServiceAccountConfig is the kube-apiserver configuration for service accounts. +message ServiceAccountConfig { + // Issuer is the identifier of the service account token issuer. The issuer will assert this + // identifier in "iss" claim of issued tokens. This value is a string or URI. + // Defaults to URI of the API server. + // +optional + optional string issuer = 1; + + // SigningKeySecret is a reference to a secret that contains an optional private key of the + // service account token issuer. The issuer will sign issued ID tokens with this private key. + // Only useful if service account tokens are also issued by another external system. + // +optional + optional k8s.io.api.core.v1.LocalObjectReference signingKeySecretName = 2; +} + +message Shoot { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the Shoot cluster. + // +optional + optional ShootSpec spec = 2; + + // Most recently observed status of the Shoot cluster. + // +optional + optional ShootStatus status = 3; +} + +// ShootList is a list of Shoot objects. +message ShootList { + // Standard list object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Shoots. + repeated Shoot items = 2; +} + +// ShootMachineImage defines the name and the version of the shoot's machine image in any environment. Has to be +// defined in the respective CloudProfile. +message ShootMachineImage { + // Name is the name of the image. + optional string name = 1; + + // ProviderConfig is the shoot's individual configuration passed to an extension resource. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2; + + // Version is the version of the shoot's image. + // If version is not provided, it will be defaulted to the latest version from the CloudProfile. + // +optional + optional string version = 3; +} + +// ShootNetworks contains the default networks CIDRs for shoots. +message ShootNetworks { + // Pods is the CIDR of the pod network. + // +optional + optional string pods = 1; + + // Services is the CIDR of the service network. + // +optional + optional string services = 2; +} + +// ShootSpec is the specification of a Shoot. +message ShootSpec { + // Addons contains information about enabled/disabled addons and their configuration. + // +optional + optional Addons addons = 1; + + // CloudProfileName is a name of a CloudProfile object. + optional string cloudProfileName = 2; + + // DNS contains information about the DNS settings of the Shoot. + // +optional + optional DNS dns = 3; + + // Extensions contain type and provider information for Shoot extensions. + // +optional + repeated Extension extensions = 4; + + // Hibernation contains information whether the Shoot is suspended or not. + // +optional + optional Hibernation hibernation = 5; + + // Kubernetes contains the version and configuration settings of the control plane components. + optional Kubernetes kubernetes = 6; + + // Networking contains information about cluster networking such as CNI Plugin type, CIDRs, ...etc. + optional Networking networking = 7; + + // Maintenance contains information about the time window for maintenance operations and which + // operations should be performed. + // +optional + optional Maintenance maintenance = 8; + + // Monitoring contains information about custom monitoring configurations for the shoot. + // +optional + optional Monitoring monitoring = 9; + + // Provider contains all provider-specific and provider-relevant information. + optional Provider provider = 10; + + // Purpose is the purpose class for this cluster. + // +optional + optional string purpose = 11; + + // Region is a name of a region. + optional string region = 12; + + // SecretBindingName is the name of the a SecretBinding that has a reference to the provider secret. + // The credentials inside the provider secret will be used to create the shoot in the respective account. + optional string secretBindingName = 13; + + // SeedName is the name of the seed cluster that runs the control plane of the Shoot. + // +optional + optional string seedName = 14; + + // SeedSelector is an optional selector which must match a seed's labels for the shoot to be scheduled on that seed. + // +optional + optional SeedSelector seedSelector = 15; + + // Resources holds a list of named resource references that can be referred to in extension configs by their names. + // +optional + repeated NamedResourceReference resources = 16; + + // Tolerations contains the tolerations for taints on seed clusters. + // +patchMergeKey=key + // +patchStrategy=merge + // +optional + repeated Toleration tolerations = 17; +} + +// ShootState contains a snapshot of the Shoot's state required to migrate the Shoot's control plane to a new Seed. +message ShootState { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the ShootState. + // +optional + optional ShootStateSpec spec = 2; +} + +// ShootStateList is a list of ShootState objects. +message ShootStateList { + // Standard list object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ShootStates. + repeated ShootState items = 2; +} + +// ShootStateSpec is the specification of the ShootState. +message ShootStateSpec { + // Gardener holds the data required to generate resources deployed by the gardenlet + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + repeated GardenerResourceData gardener = 1; + + // Extensions holds the state of custom resources reconciled by extension controllers in the seed + // +optional + repeated ExtensionResourceState extensions = 2; + + // Resources holds the data of resources referred to by extension controller states + // +optional + repeated ResourceData resources = 3; +} + +// ShootStatus holds the most recently observed status of the Shoot cluster. +message ShootStatus { + // Conditions represents the latest available observations of a Shoots's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated Condition conditions = 1; + + // Constraints represents conditions of a Shoot's current state that constraint some operations on it. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated Condition constraints = 2; + + // Gardener holds information about the Gardener which last acted on the Shoot. + optional Gardener gardener = 3; + + // IsHibernated indicates whether the Shoot is currently hibernated. + optional bool hibernated = 4; + + // LastOperation holds information about the last operation on the Shoot. + // +optional + optional LastOperation lastOperation = 5; + + // LastError holds information about the last occurred error during an operation. + // +optional + optional LastError lastError = 6; + + // LastErrors holds information about the last occurred error(s) during an operation. + // +optional + repeated LastError lastErrors = 7; + + // ObservedGeneration is the most recent generation observed for this Shoot. It corresponds to the + // Shoot's generation, which is updated on mutation by the API Server. + // +optional + optional int64 observedGeneration = 8; + + // RetryCycleStartTime is the start time of the last retry cycle (used to determine how often an operation + // must be retried until we give up). + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time retryCycleStartTime = 9; + + // Seed is the name of the seed cluster that runs the control plane of the Shoot. This value is only written + // after a successful create/reconcile operation. It will be used when control planes are moved between Seeds. + // +optional + optional string seed = 10; + + // TechnicalID is the name that is used for creating the Seed namespace, the infrastructure resources, and + // basically everything that is related to this particular Shoot. + optional string technicalID = 11; + + // UID is a unique identifier for the Shoot cluster to avoid portability between Kubernetes clusters. + // It is used to compute unique hashes. + optional string uid = 12; + + // ClusterIdentity is the identity of the Shoot cluster + // +optional + optional string clusterIdentity = 13; +} + +// Toleration is a toleration for a seed taint. +message Toleration { + // Key is the toleration key to be applied to a project or shoot. + optional string key = 1; + + // Value is the toleration value corresponding to the toleration key. + // +optional + optional string value = 2; +} + +// VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler. +message VerticalPodAutoscaler { + // Enabled specifies whether the Kubernetes VPA shall be enabled for the shoot cluster. + optional bool enabled = 1; + + // EvictAfterOOMThreshold defines the threshold that will lead to pod eviction in case it OOMed in less than the given + // threshold since its start and if it has only one container (default: 10m0s). + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration evictAfterOOMThreshold = 2; + + // EvictionRateBurst defines the burst of pods that can be evicted (default: 1) + // +optional + optional int32 evictionRateBurst = 3; + + // EvictionRateLimit defines the number of pods that can be evicted per second. A rate limit set to 0 or -1 will + // disable the rate limiter (default: -1). + // +optional + optional double evictionRateLimit = 4; + + // EvictionTolerance defines the fraction of replica count that can be evicted for update in case more than one + // pod can be evicted (default: 0.5). + // +optional + optional double evictionTolerance = 5; + + // RecommendationMarginFraction is the fraction of usage added as the safety margin to the recommended request + // (default: 0.15). + // +optional + optional double recommendationMarginFraction = 6; + + // UpdaterInterval is the interval how often the updater should run (default: 1m0s). + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration updaterInterval = 7; + + // RecommenderInterval is the interval how often metrics should be fetched (default: 1m0s). + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration recommenderInterval = 8; +} + +// Volume contains information about the volume type and size. +message Volume { + // Name of the volume to make it referencable. + // +optional + optional string name = 1; + + // Type is the type of the volume. + // +optional + optional string type = 2; + + // Size is the size of the volume. + optional string size = 3; + + // Encrypted determines if the volume should be encrypted. + // +optional + optional bool primary = 4; +} + +// VolumeType contains certain properties of a volume type. +message VolumeType { + // Class is the class of the volume type. + optional string class = 1; + + // Name is the name of the volume type. + optional string name = 2; + + // Usable defines if the volume type can be used for shoot clusters. + // +optional + optional bool usable = 3; +} + +// WatchCacheSizes contains configuration of the API server's watch cache sizes. +message WatchCacheSizes { + // Default configures the default watch cache size of the kube-apiserver + // (flag `--default-watch-cache-size`, defaults to 100). + // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/ + // +optional + optional int32 default = 1; + + // Resources configures the watch cache size of the kube-apiserver per resource + // (flag `--watch-cache-sizes`). + // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/ + // +optional + repeated ResourceWatchCacheSize resources = 2; +} + +// Worker is the base definition of a worker group. +message Worker { + // Annotations is a map of key/value pairs for annotations for all the `Node` objects in this worker pool. + // +optional + map annotations = 1; + + // CABundle is a certificate bundle which will be installed onto every machine of this worker pool. + // +optional + optional string caBundle = 2; + + // CRI contains configurations of CRI support of every machine in the worker pool + // +optional + optional CRI cri = 3; + + // Kubernetes contains configuration for Kubernetes components related to this worker pool. + // +optional + optional WorkerKubernetes kubernetes = 4; + + // Labels is a map of key/value pairs for labels for all the `Node` objects in this worker pool. + // +optional + map labels = 5; + + // Name is the name of the worker group. + optional string name = 6; + + // Machine contains information about the machine type and image. + optional Machine machine = 7; + + // Maximum is the maximum number of VMs to create. + optional int32 maximum = 8; + + // Minimum is the minimum number of VMs to create. + optional int32 minimum = 9; + + // MaxSurge is maximum number of VMs that are created during an update. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 10; + + // MaxUnavailable is the maximum number of VMs that can be unavailable during an update. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 11; + + // ProviderConfig is the provider-specific configuration for this worker pool. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 12; + + // Taints is a list of taints for all the `Node` objects in this worker pool. + // +optional + repeated k8s.io.api.core.v1.Taint taints = 13; + + // Volume contains information about the volume type and size. + // +optional + optional Volume volume = 14; + + // DataVolumes contains a list of additional worker volumes. + // +optional + repeated DataVolume dataVolumes = 15; + + // KubeletDataVolumeName contains the name of a dataVolume that should be used for storing kubelet state. + // +optional + optional string kubeletDataVolumeName = 16; + + // Zones is a list of availability zones that are used to evenly distribute this worker pool. Optional + // as not every provider may support availability zones. + // +optional + repeated string zones = 17; + + // SystemComponents contains configuration for system components related to this worker pool + // +optional + optional WorkerSystemComponents systemComponents = 18; + + // MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout. + // +optional + optional MachineControllerManagerSettings machineControllerManager = 19; +} + +// WorkerKubernetes contains configuration for Kubernetes components related to this worker pool. +message WorkerKubernetes { + // Kubelet contains configuration settings for all kubelets of this worker pool. + // +optional + optional KubeletConfig kubelet = 1; +} + +// WorkerSystemComponents contains configuration for system components related to this worker pool +message WorkerSystemComponents { + // Allow determines whether the pool should be allowed to host system components or not (defaults to true) + optional bool allow = 1; +} + diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/register.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/register.go new file mode 100644 index 000000000..0e9c27c36 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/register.go @@ -0,0 +1,77 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the name of the core API group. +const GroupName = "core.gardener.cloud" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is a new Scheme Builder which registers our API. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + localSchemeBuilder = &SchemeBuilder + // AddToScheme is a reference to the Scheme Builder's AddToScheme function. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &BackupBucket{}, + &BackupBucketList{}, + &BackupEntry{}, + &BackupEntryList{}, + &CloudProfile{}, + &CloudProfileList{}, + &ControllerRegistration{}, + &ControllerRegistrationList{}, + &ControllerInstallation{}, + &ControllerInstallationList{}, + &Plant{}, + &PlantList{}, + &Project{}, + &ProjectList{}, + &Quota{}, + &QuotaList{}, + &SecretBinding{}, + &SecretBindingList{}, + &Seed{}, + &SeedList{}, + &ShootState{}, + &ShootStateList{}, + &Shoot{}, + &ShootList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types.go new file mode 100644 index 000000000..6bd416f72 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types.go @@ -0,0 +1,21 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +const ( + // GardenerSeedLeaseNamespace is the namespace in which Gardenlet will report Seeds' + // status using Lease resources for each Seed + GardenerSeedLeaseNamespace = "gardener-system-seed-lease" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_backupbucket.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_backupbucket.go new file mode 100644 index 000000000..7ec6112ee --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_backupbucket.go @@ -0,0 +1,91 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupBucket holds details about backup bucket +type BackupBucket struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the Backup Bucket. + Spec BackupBucketSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // Most recently observed status of the Backup Bucket. + Status BackupBucketStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupBucketList is a list of BackupBucket objects. +type BackupBucketList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of BackupBucket. + Items []BackupBucket `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// BackupBucketSpec is the specification of a Backup Bucket. +type BackupBucketSpec struct { + // Provider hold the details of cloud provider of the object store. + Provider BackupBucketProvider `json:"provider" protobuf:"bytes,1,opt,name=provider"` + // ProviderConfig is the configuration passed to BackupBucket resource. + // +optional + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` + // SecretRef is a reference to a secret that contains the credentials to access object store. + SecretRef corev1.SecretReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"` + // Seed holds the name of the seed allocated to BackupBucket for running controller. + // +optional + Seed *string `json:"seed,omitempty" protobuf:"bytes,4,opt,name=seed"` +} + +// BackupBucketStatus holds the most recently observed status of the Backup Bucket. +type BackupBucketStatus struct { + // ProviderStatus is the configuration passed to BackupBucket resource. + // +optional + ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty" protobuf:"bytes,1,opt,name=providerStatus"` + // LastOperation holds information about the last operation on the BackupBucket. + // +optional + LastOperation *LastOperation `json:"lastOperation,omitempty" protobuf:"bytes,2,opt,name=lastOperation"` + // LastError holds information about the last occurred error during an operation. + // +optional + LastError *LastError `json:"lastError,omitempty" protobuf:"bytes,3,opt,name=lastError"` + // ObservedGeneration is the most recent generation observed for this BackupBucket. It corresponds to the + // BackupBucket's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,4,opt,name=observedGeneration"` + // GeneratedSecretRef is reference to the secret generated by backup bucket, which + // will have object store specific credentials. + // +optional + GeneratedSecretRef *corev1.SecretReference `json:"generatedSecretRef,omitempty" protobuf:"bytes,5,opt,name=generatedSecretRef"` +} + +// BackupBucketProvider holds the details of cloud provider of the object store. +type BackupBucketProvider struct { + // Type is the type of provider. + Type string `json:"type" protobuf:"bytes,1,opt,name=type"` + // Region is the region of the bucket. + Region string `json:"region" protobuf:"bytes,2,opt,name=region"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_backupentry.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_backupentry.go new file mode 100644 index 000000000..3d8efe8c0 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_backupentry.go @@ -0,0 +1,75 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // BackupEntryForceDeletion is a constant for an annotation on a BackupEntry indicating that it should be force deleted. + BackupEntryForceDeletion = "backupentry.core.gardener.cloud/force-deletion" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupEntry holds details about shoot backup. +type BackupEntry struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + // Spec contains the specification of the Backup Entry. + // +optional + Spec BackupEntrySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Status contains the most recently observed status of the Backup Entry. + // +optional + Status BackupEntryStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupEntryList is a list of BackupEntry objects. +type BackupEntryList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of BackupEntry. + Items []BackupEntry `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// BackupEntrySpec is the specification of a Backup Entry. +type BackupEntrySpec struct { + // BucketName is the name of backup bucket for this Backup Entry. + BucketName string `json:"bucketName" protobuf:"bytes,1,opt,name=bucketName"` + // Seed holds the name of the seed allocated to BackupEntry for running controller. + // +optional + Seed *string `json:"seed,omitempty" protobuf:"bytes,2,opt,name=seed"` +} + +// BackupEntryStatus holds the most recently observed status of the Backup Entry. +type BackupEntryStatus struct { + // LastOperation holds information about the last operation on the BackupEntry. + // +optional + LastOperation *LastOperation `json:"lastOperation,omitempty" protobuf:"bytes,1,opt,name=lastOperation"` + // LastError holds information about the last occurred error during an operation. + // +optional + LastError *LastError `json:"lastError,omitempty" protobuf:"bytes,2,opt,name=lastError"` + // ObservedGeneration is the most recent generation observed for this BackupEntry. It corresponds to the + // BackupEntry's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_cloudprofile.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_cloudprofile.go new file mode 100644 index 000000000..2e0b09acc --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_cloudprofile.go @@ -0,0 +1,226 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CloudProfile represents certain properties about a provider environment. +type CloudProfile struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Spec defines the provider environment properties. + // +optional + Spec CloudProfileSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CloudProfileList is a collection of CloudProfiles. +type CloudProfileList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of CloudProfiles. + Items []CloudProfile `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// CloudProfileSpec is the specification of a CloudProfile. +// It must contain exactly one of its defined keys. +type CloudProfileSpec struct { + // CABundle is a certificate bundle which will be installed onto every host machine of shoot cluster targeting this profile. + // +optional + CABundle *string `json:"caBundle,omitempty" protobuf:"bytes,1,opt,name=caBundle"` + // Kubernetes contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification. + Kubernetes KubernetesSettings `json:"kubernetes" protobuf:"bytes,2,opt,name=kubernetes"` + // MachineImages contains constraints regarding allowed values for machine images in the Shoot specification. + // +patchMergeKey=name + // +patchStrategy=merge + MachineImages []MachineImage `json:"machineImages" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,3,rep,name=machineImages"` + // MachineTypes contains constraints regarding allowed values for machine types in the 'workers' block in the Shoot specification. + // +patchMergeKey=name + // +patchStrategy=merge + MachineTypes []MachineType `json:"machineTypes" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,4,rep,name=machineTypes"` + // ProviderConfig contains provider-specific configuration for the profile. + // +optional + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,5,opt,name=providerConfig"` + // Regions contains constraints regarding allowed values for regions and zones. + // +patchMergeKey=name + // +patchStrategy=merge + Regions []Region `json:"regions" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=regions"` + // SeedSelector contains an optional list of labels on `Seed` resources that marks those seeds whose shoots may use this provider profile. + // An empty list means that all seeds of the same provider type are supported. + // This is useful for environments that are of the same type (like openstack) but may have different "instances"/landscapes. + // Optionally a list of possible providers can be added to enable cross-provider scheduling. By default, the provider + // type of the seed must match the shoot's provider. + // +optional + SeedSelector *SeedSelector `json:"seedSelector,omitempty" protobuf:"bytes,7,opt,name=seedSelector"` + // Type is the name of the provider. + Type string `json:"type" protobuf:"bytes,8,opt,name=type"` + // VolumeTypes contains constraints regarding allowed values for volume types in the 'workers' block in the Shoot specification. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + VolumeTypes []VolumeType `json:"volumeTypes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,rep,name=volumeTypes"` +} + +// SeedSelector contains constraints for selecting seed to be usable for shoots using a profile +type SeedSelector struct { + // LabelSelector is optional and can be used to select seeds by their label settings + // +optional + *metav1.LabelSelector `json:",inline,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` + // Providers is optional and can be used by restricting seeds by their provider type. '*' can be used to enable seeds regardless of their provider type. + // +optional + ProviderTypes []string `json:"providerTypes,omitempty" protobuf:"bytes,2,rep,name=providerTypes"` +} + +// KubernetesSettings contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification. +type KubernetesSettings struct { + // Versions is the list of allowed Kubernetes versions with optional expiration dates for Shoot clusters. + // +patchMergeKey=version + // +patchStrategy=merge + // +optional + Versions []ExpirableVersion `json:"versions,omitempty" patchStrategy:"merge" patchMergeKey:"version" protobuf:"bytes,1,rep,name=versions"` +} + +// MachineImage defines the name and multiple versions of the machine image in any environment. +type MachineImage struct { + // Name is the name of the image. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Versions contains versions, expiration dates and container runtimes of the machine image + // +patchMergeKey=version + // +patchStrategy=merge + Versions []MachineImageVersion `json:"versions" patchStrategy:"merge" patchMergeKey:"version" protobuf:"bytes,2,rep,name=versions"` +} + +// MachineImageVersion is an expirable version with list of supported container runtimes and interfaces +type MachineImageVersion struct { + ExpirableVersion `json:",inline" protobuf:"bytes,1,opt,name=expirableVersion"` + // CRI list of supported container runtime and interfaces supported by this version + // +optional + CRI []CRI `json:"cri,omitempty" protobuf:"bytes,2,rep,name=cri"` +} + +// ExpirableVersion contains a version and an expiration date. +type ExpirableVersion struct { + // Version is the version identifier. + Version string `json:"version" protobuf:"bytes,1,opt,name=version"` + // ExpirationDate defines the time at which this version expires. + // +optional + ExpirationDate *metav1.Time `json:"expirationDate,omitempty" protobuf:"bytes,2,opt,name=expirationDate"` + // Classification defines the state of a version (preview, supported, deprecated) + // +optional + Classification *VersionClassification `json:"classification,omitempty" protobuf:"bytes,3,opt,name=classification,casttype=VersionClassification"` +} + +// MachineType contains certain properties of a machine type. +type MachineType struct { + // CPU is the number of CPUs for this machine type. + CPU resource.Quantity `json:"cpu" protobuf:"bytes,1,opt,name=cpu"` + // GPU is the number of GPUs for this machine type. + GPU resource.Quantity `json:"gpu" protobuf:"bytes,2,opt,name=gpu"` + // Memory is the amount of memory for this machine type. + Memory resource.Quantity `json:"memory" protobuf:"bytes,3,opt,name=memory"` + // Name is the name of the machine type. + Name string `json:"name" protobuf:"bytes,4,opt,name=name"` + // Storage is the amount of storage associated with the root volume of this machine type. + // +optional + Storage *MachineTypeStorage `json:"storage,omitempty" protobuf:"bytes,5,opt,name=storage"` + // Usable defines if the machine type can be used for shoot clusters. + // +optional + Usable *bool `json:"usable,omitempty" protobuf:"varint,6,opt,name=usable"` +} + +// MachineTypeStorage is the amount of storage associated with the root volume of this machine type. +type MachineTypeStorage struct { + // Class is the class of the storage type. + Class string `json:"class" protobuf:"bytes,1,opt,name=class"` + // StorageSize is the storage size. + StorageSize resource.Quantity `json:"size" protobuf:"bytes,2,opt,name=size"` + // Type is the type of the storage. + Type string `json:"type" protobuf:"bytes,3,opt,name=type"` +} + +// Region contains certain properties of a region. +type Region struct { + // Name is a region name. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Zones is a list of availability zones in this region. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + Zones []AvailabilityZone `json:"zones,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=zones"` + // Labels is an optional set of key-value pairs that contain certain administrator-controlled labels for this region. + // It can be used by Gardener administrators/operators to provide additional information about a region, e.g. wrt + // quality, reliability, access restrictions, etc. + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,3,rep,name=labels"` +} + +// AvailabilityZone is an availability zone. +type AvailabilityZone struct { + // Name is an an availability zone name. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // UnavailableMachineTypes is a list of machine type names that are not availability in this zone. + // +optional + UnavailableMachineTypes []string `json:"unavailableMachineTypes,omitempty" protobuf:"bytes,2,rep,name=unavailableMachineTypes"` + // UnavailableVolumeTypes is a list of volume type names that are not availability in this zone. + // +optional + UnavailableVolumeTypes []string `json:"unavailableVolumeTypes,omitempty" protobuf:"bytes,3,rep,name=unavailableVolumeTypes"` +} + +// VolumeType contains certain properties of a volume type. +type VolumeType struct { + // Class is the class of the volume type. + Class string `json:"class" protobuf:"bytes,1,opt,name=class"` + // Name is the name of the volume type. + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + // Usable defines if the volume type can be used for shoot clusters. + // +optional + Usable *bool `json:"usable,omitempty" protobuf:"varint,3,opt,name=usable"` +} + +const ( + // VolumeClassStandard is a constant for the standard volume class. + VolumeClassStandard string = "standard" + // VolumeClassPremium is a constant for the premium volume class. + VolumeClassPremium string = "premium" +) + +// VersionClassification is the logical state of a version according to https://github.com/gardener/gardener/blob/master/docs/operations/versioning.md +type VersionClassification string + +const ( + // ClassificationPreview indicates that a version has recently been added and not promoted to "Supported" yet. + // ClassificationPreview versions will not be considered for automatic Kubernetes and Machine Image patch version updates. + ClassificationPreview VersionClassification = "preview" + // ClassificationSupported indicates that a patch version is the recommended version for a shoot. + // Using VersionMaintenance (see: https://github.com/gardener/gardener/docs/operation/versioning.md) there is one supported version per maintained minor version. + // Supported versions are eligible for the automated Kubernetes and Machine image patch version update for shoot clusters in Gardener. + ClassificationSupported VersionClassification = "supported" + // ClassificationDeprecated indicates that a patch version should not be used anymore, should be updated to a new version + // and will eventually expire. + ClassificationDeprecated VersionClassification = "deprecated" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_common.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_common.go new file mode 100644 index 000000000..17bd41c37 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_common.go @@ -0,0 +1,141 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// ErrorCode is a string alias. +type ErrorCode string + +const ( + // ErrorInfraUnauthorized indicates that the last error occurred due to invalid infrastructure credentials. + ErrorInfraUnauthorized ErrorCode = "ERR_INFRA_UNAUTHORIZED" + // ErrorInfraInsufficientPrivileges indicates that the last error occurred due to insufficient infrastructure privileges. + ErrorInfraInsufficientPrivileges ErrorCode = "ERR_INFRA_INSUFFICIENT_PRIVILEGES" + // ErrorInfraQuotaExceeded indicates that the last error occurred due to infrastructure quota limits. + ErrorInfraQuotaExceeded ErrorCode = "ERR_INFRA_QUOTA_EXCEEDED" + // ErrorInfraDependencies indicates that the last error occurred due to dependent objects on the infrastructure level. + ErrorInfraDependencies ErrorCode = "ERR_INFRA_DEPENDENCIES" + // ErrorInfraResourcesDepleted indicates that the last error occurred due to depleted resource in the infrastructure. + ErrorInfraResourcesDepleted ErrorCode = "ERR_INFRA_RESOURCES_DEPLETED" + // ErrorCleanupClusterResources indicates that the last error occurred due to resources in the cluster that are stuck in deletion. + ErrorCleanupClusterResources ErrorCode = "ERR_CLEANUP_CLUSTER_RESOURCES" + // ErrorConfigurationProblem indicates that the last error occurred due to a configuration problem. + ErrorConfigurationProblem ErrorCode = "ERR_CONFIGURATION_PROBLEM" +) + +// LastError indicates the last occurred error for an operation on a resource. +type LastError struct { + // A human readable message indicating details about the last error. + Description string `json:"description" protobuf:"bytes,1,opt,name=description"` + // ID of the task which caused this last error + // +optional + TaskID *string `json:"taskID,omitempty" protobuf:"bytes,2,opt,name=taskID"` + // Well-defined error codes of the last error(s). + // +optional + Codes []ErrorCode `json:"codes,omitempty" protobuf:"bytes,3,rep,name=codes,casttype=ErrorCode"` + // Last time the error was reported + // +optional + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,4,opt,name=lastUpdateTime"` +} + +// LastOperationType is a string alias. +type LastOperationType string + +const ( + // LastOperationTypeCreate indicates a 'create' operation. + LastOperationTypeCreate LastOperationType = "Create" + // LastOperationTypeReconcile indicates a 'reconcile' operation. + LastOperationTypeReconcile LastOperationType = "Reconcile" + // LastOperationTypeDelete indicates a 'delete' operation. + LastOperationTypeDelete LastOperationType = "Delete" + // LastOperationTypeMigrate indicates a 'migrate' operation. + LastOperationTypeMigrate LastOperationType = "Migrate" + // LastOperationTypeRestore indicates a 'restore' operation. + LastOperationTypeRestore LastOperationType = "Restore" +) + +// LastOperationState is a string alias. +type LastOperationState string + +const ( + // LastOperationStateProcessing indicates that an operation is ongoing. + LastOperationStateProcessing LastOperationState = "Processing" + // LastOperationStateSucceeded indicates that an operation has completed successfully. + LastOperationStateSucceeded LastOperationState = "Succeeded" + // LastOperationStateError indicates that an operation is completed with errors and will be retried. + LastOperationStateError LastOperationState = "Error" + // LastOperationStateFailed indicates that an operation is completed with errors and won't be retried. + LastOperationStateFailed LastOperationState = "Failed" + // LastOperationStatePending indicates that an operation cannot be done now, but will be tried in future. + LastOperationStatePending LastOperationState = "Pending" + // LastOperationStateAborted indicates that an operation has been aborted. + LastOperationStateAborted LastOperationState = "Aborted" +) + +// LastOperation indicates the type and the state of the last operation, along with a description +// message and a progress indicator. +type LastOperation struct { + // A human readable message indicating details about the last operation. + Description string `json:"description" protobuf:"bytes,1,opt,name=description"` + // Last time the operation state transitioned from one to another. + LastUpdateTime metav1.Time `json:"lastUpdateTime" protobuf:"bytes,2,opt,name=lastUpdateTime"` + // The progress in percentage (0-100) of the last operation. + Progress int32 `json:"progress" protobuf:"varint,3,opt,name=progress"` + // Status of the last operation, one of Aborted, Processing, Succeeded, Error, Failed. + State LastOperationState `json:"state" protobuf:"bytes,4,opt,name=state,casttype=LastOperationState"` + // Type of the last operation, one of Create, Reconcile, Delete. + Type LastOperationType `json:"type" protobuf:"bytes,5,opt,name=type,casttype=LastOperationType"` +} + +// Gardener holds the information about the Gardener version that operated a resource. +type Gardener struct { + // ID is the Docker container id of the Gardener which last acted on a resource. + ID string `json:"id" protobuf:"bytes,1,opt,name=id"` + // Name is the hostname (pod name) of the Gardener which last acted on a resource. + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + // Version is the version of the Gardener which last acted on a resource. + Version string `json:"version" protobuf:"bytes,3,opt,name=version"` +} + +const ( + // GardenerName is the value in a Garden resource's `.metadata.finalizers[]` array on which the Gardener will react + // when performing a delete request on a resource. + GardenerName = "gardener" + // ExternalGardenerName is the value in a Kubernetes core resources `.metadata.finalizers[]` array on which the + // Gardener will react when performing a delete request on a resource. + ExternalGardenerName = "gardener.cloud/gardener" +) + +const ( + // EventReconciling indicates that the a Reconcile operation started. + EventReconciling = "Reconciling" + // EventReconciled indicates that the a Reconcile operation was successful. + EventReconciled = "Reconciled" + // EventReconcileError indicates that the a Reconcile operation failed. + EventReconcileError = "ReconcileError" + // EventDeleting indicates that the a Delete operation started. + EventDeleting = "Deleting" + // EventDeleted indicates that the a Delete operation was successful. + EventDeleted = "Deleted" + // EventDeleteError indicates that the a Delete operation failed. + EventDeleteError = "DeleteError" + // EventPrepareMigration indicates that a Prepare Migration operation started. + EventPrepareMigration = "PrepareMigration" + // EventMigrationPrepared indicates that Migration preparation was successful. + EventMigrationPrepared = "MigrationPrepared" + // EventMigrationPreparationFailed indicates that Migration preparation failed. + EventMigrationPreparationFailed = "MigrationPreparationFailed" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_controllerinstallation.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_controllerinstallation.go new file mode 100644 index 000000000..7dd4ed9b4 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_controllerinstallation.go @@ -0,0 +1,80 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerInstallation represents an installation request for an external controller. +type ControllerInstallation struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Spec contains the specification of this installation. + Spec ControllerInstallationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Status contains the status of this installation. + Status ControllerInstallationStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerInstallationList is a collection of ControllerInstallations. +type ControllerInstallationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of ControllerInstallations. + Items []ControllerInstallation `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ControllerInstallationSpec is the specification of a ControllerInstallation. +type ControllerInstallationSpec struct { + // RegistrationRef is used to reference a ControllerRegistration resources. + RegistrationRef corev1.ObjectReference `json:"registrationRef" protobuf:"bytes,1,opt,name=registrationRef"` + // SeedRef is used to reference a Seed resources. + SeedRef corev1.ObjectReference `json:"seedRef" protobuf:"bytes,2,opt,name=seedRef"` +} + +// ControllerInstallationStatus is the status of a ControllerInstallation. +type ControllerInstallationStatus struct { + // Conditions represents the latest available observations of a ControllerInstallations's current state. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + // ProviderStatus contains type-specific status. + // +optional + ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty" protobuf:"bytes,2,opt,name=providerStatus"` +} + +const ( + // ControllerInstallationHealthy is a condition type for indicating whether the controller is healthy. + ControllerInstallationHealthy ConditionType = "Healthy" + // ControllerInstallationInstalled is a condition type for indicating whether the controller has been installed. + ControllerInstallationInstalled ConditionType = "Installed" + // ControllerInstallationValid is a condition type for indicating whether the installation request is valid. + ControllerInstallationValid ConditionType = "Valid" + // ControllerInstallationRequired is a condition type for indicating that the respective extension controller is + // still required on the seed cluster as corresponding extension resources still exist. + ControllerInstallationRequired ConditionType = "Required" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_controllerregistration.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_controllerregistration.go new file mode 100644 index 000000000..2465795a3 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_controllerregistration.go @@ -0,0 +1,108 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRegistration represents a registration of an external controller. +type ControllerRegistration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Spec contains the specification of this registration. + Spec ControllerRegistrationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRegistrationList is a collection of ControllerRegistrations. +type ControllerRegistrationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of ControllerRegistrations. + Items []ControllerRegistration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ControllerRegistrationSpec is the specification of a ControllerRegistration. +type ControllerRegistrationSpec struct { + // Resources is a list of combinations of kinds (DNSProvider, Infrastructure, Generic, ...) and their actual types + // (aws-route53, gcp, auditlog, ...). + // +optional + Resources []ControllerResource `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"` + // Deployment contains information for how this controller is deployed. + // +optional + Deployment *ControllerDeployment `json:"deployment,omitempty" protobuf:"bytes,2,opt,name=deployment"` +} + +// ControllerResource is a combination of a kind (DNSProvider, Infrastructure, Generic, ...) and the actual type for this +// kind (aws-route53, gcp, auditlog, ...). +type ControllerResource struct { + // Kind is the resource kind, for example "OperatingSystemConfig". + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Type is the resource type, for example "coreos" or "ubuntu". + Type string `json:"type" protobuf:"bytes,2,opt,name=type"` + // GloballyEnabled determines if this ControllerResource is required by all Shoot clusters. + // +optional + GloballyEnabled *bool `json:"globallyEnabled,omitempty" protobuf:"varint,3,opt,name=globallyEnabled"` + // ReconcileTimeout defines how long Gardener should wait for the resource reconciliation. + // +optional + ReconcileTimeout *metav1.Duration `json:"reconcileTimeout,omitempty" protobuf:"bytes,4,opt,name=reconcileTimeout"` + // Primary determines if the controller backed by this ControllerRegistration is responsible for the extension + // resource's lifecycle. This field defaults to true. There must be exactly one primary controller for this kind/type + // combination. + // +optional + Primary *bool `json:"primary,omitempty" protobuf:"varint,5,opt,name=primary"` +} + +// ControllerDeployment contains information for how this controller is deployed. +type ControllerDeployment struct { + // Type is the deployment type. + Type string `json:"type" protobuf:"bytes,1,opt,name=type"` + // ProviderConfig contains type-specific configuration. + // +optional + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` + // Policy controls how the controller is deployed. It defaults to 'OnDemand'. + // +optional + Policy *ControllerDeploymentPolicy `json:"policy,omitempty" protobuf:"bytes,3,opt,name=policy"` + // SeedSelector contains an optional label selector for seeds. Only if the labels match then this controller will be + // considered for a deployment. + // An empty list means that all seeds are selected. + // +optional + SeedSelector *metav1.LabelSelector `json:"seedSelector,omitempty" protobuf:"bytes,4,opt,name=seedSelector"` +} + +// ControllerDeploymentPolicy is a string alias. +type ControllerDeploymentPolicy string + +const ( + // ControllerDeploymentPolicyOnDemand specifies that the controller shall be only deployed if required by another + // resource. If nothing requires it then the controller shall not be deployed. + ControllerDeploymentPolicyOnDemand ControllerDeploymentPolicy = "OnDemand" + // ControllerDeploymentPolicyAlways specifies that the controller shall be deployed always, independent of whether + // another resource requires it or the respective seed has shoots. + ControllerDeploymentPolicyAlways ControllerDeploymentPolicy = "Always" + // ControllerDeploymentPolicyAlwaysExceptNoShoots specifies that the controller shall be deployed always, independent of + // whether another resource requires it, but only when the respective seed has at least one shoot. + ControllerDeploymentPolicyAlwaysExceptNoShoots ControllerDeploymentPolicy = "AlwaysExceptNoShoots" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_plant.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_plant.go new file mode 100644 index 000000000..26d891e28 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_plant.go @@ -0,0 +1,112 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type Plant struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Spec contains the specification of this Plant. + Spec PlantSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Status contains the status of this Plant. + Status PlantStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PlantList is a collection of Plants. +type PlantList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of Plants. + Items []Plant `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +const ( + // PlantEveryNodeReady is a constant for a condition type indicating the node health. + PlantEveryNodeReady ConditionType = "EveryNodeReady" + // PlantAPIServerAvailable is a constant for a condition type indicating that the Plant cluster API server is available. + PlantAPIServerAvailable ConditionType = "APIServerAvailable" +) + +// PlantSpec is the specification of a Plant. +type PlantSpec struct { + // SecretRef is a reference to a Secret object containing the Kubeconfig of the external kubernetes + // clusters to be added to Gardener. + SecretRef corev1.LocalObjectReference `json:"secretRef" protobuf:"bytes,1,opt,name=secretRef"` + // Endpoints is the configuration plant endpoints + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + Endpoints []Endpoint `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=endpoints"` +} + +// PlantStatus is the status of a Plant. +type PlantStatus struct { + // Conditions represents the latest available observations of a Plant's current state. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + // ObservedGeneration is the most recent generation observed for this Plant. It corresponds to the + // Plant's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,2,opt,name=observedGeneration"` + // ClusterInfo is additional computed information about the newly added cluster (Plant) + ClusterInfo *ClusterInfo `json:"clusterInfo,omitempty" protobuf:"bytes,3,opt,name=clusterInfo"` +} + +// Endpoint is an endpoint for monitoring, logging and other services around the plant. +type Endpoint struct { + // Name is the name of the endpoint + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // URL is the url of the endpoint + URL string `json:"url" protobuf:"bytes,2,opt,name=url"` + // Purpose is the purpose of the endpoint + Purpose string `json:"purpose" protobuf:"bytes,3,opt,name=purpose"` +} + +// ClusterInfo contains information about the Plant cluster +type ClusterInfo struct { + // Cloud describes the cloud information + Cloud CloudInfo `json:"cloud" protobuf:"bytes,1,opt,name=cloud"` + // Kubernetes describes kubernetes meta information (e.g., version) + Kubernetes KubernetesInfo `json:"kubernetes" protobuf:"bytes,2,opt,name=kubernetes"` +} + +// CloudInfo contains information about the cloud +type CloudInfo struct { + // Type is the cloud type + Type string `json:"type" protobuf:"bytes,1,opt,name=type"` + // Region is the cloud region + Region string `json:"region" protobuf:"bytes,2,opt,name=region"` +} + +// KubernetesInfo contains the version and configuration variables for the Plant cluster. +type KubernetesInfo struct { + // Version is the semantic Kubernetes version to use for the Plant cluster. + Version string `json:"version" protobuf:"bytes,1,opt,name=version"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_project.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_project.go new file mode 100644 index 000000000..b48ec447e --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_project.go @@ -0,0 +1,176 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Project holds certain properties about a Gardener project. +type Project struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Spec defines the project properties. + // +optional + Spec ProjectSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Most recently observed status of the Project. + // +optional + Status ProjectStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ProjectList is a collection of Projects. +type ProjectList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of Projects. + Items []Project `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ProjectSpec is the specification of a Project. +type ProjectSpec struct { + // CreatedBy is a subject representing a user name, an email address, or any other identifier of a user + // who created the project. + // +optional + CreatedBy *rbacv1.Subject `json:"createdBy,omitempty" protobuf:"bytes,1,opt,name=createdBy"` + // Description is a human-readable description of what the project is used for. + // +optional + Description *string `json:"description,omitempty" protobuf:"bytes,2,opt,name=description"` + // Owner is a subject representing a user name, an email address, or any other identifier of a user owning + // the project. + // IMPORTANT: Be aware that this field will be removed in the `v1` version of this API in favor of the `owner` + // role. The only way to change the owner will be by moving the `owner` role. In this API version the only way + // to change the owner is to use this field. + // +optional + // TODO: Remove this field in favor of the `owner` role in `v1`. + Owner *rbacv1.Subject `json:"owner,omitempty" protobuf:"bytes,3,opt,name=owner"` + // Purpose is a human-readable explanation of the project's purpose. + // +optional + Purpose *string `json:"purpose,omitempty" protobuf:"bytes,4,opt,name=purpose"` + // Members is a list of subjects representing a user name, an email address, or any other identifier of a user, + // group, or service account that has a certain role. + // +optional + Members []ProjectMember `json:"members,omitempty" protobuf:"bytes,5,rep,name=members"` + // Namespace is the name of the namespace that has been created for the Project object. + // A nil value means that Gardener will determine the name of the namespace. + // +optional + Namespace *string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"` + // Tolerations contains the default tolerations and a whitelist for taints on seed clusters. + // +optional + Tolerations *ProjectTolerations `json:"tolerations,omitempty" protobuf:"bytes,7,opt,name=tolerations"` +} + +// ProjectStatus holds the most recently observed status of the project. +type ProjectStatus struct { + // ObservedGeneration is the most recent generation observed for this project. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + // Phase is the current phase of the project. + Phase ProjectPhase `json:"phase,omitempty" protobuf:"bytes,2,opt,name=phase,casttype=ProjectPhase"` + // StaleSinceTimestamp contains the timestamp when the project was first discovered to be stale/unused. + // +optional + StaleSinceTimestamp *metav1.Time `json:"staleSinceTimestamp,omitempty" protobuf:"bytes,3,opt,name=staleSinceTimestamp"` + // StaleAutoDeleteTimestamp contains the timestamp when the project will be garbage-collected/automatically deleted + // because it's stale/unused. + // +optional + StaleAutoDeleteTimestamp *metav1.Time `json:"staleAutoDeleteTimestamp,omitempty" protobuf:"bytes,4,opt,name=staleAutoDeleteTimestamp"` +} + +// ProjectMember is a member of a project. +type ProjectMember struct { + // Subject is representing a user name, an email address, or any other identifier of a user, group, or service + // account that has a certain role. + rbacv1.Subject `json:",inline" protobuf:"bytes,1,opt,name=subject"` + // Role represents the role of this member. + // IMPORTANT: Be aware that this field will be removed in the `v1` version of this API in favor of the `roles` + // list. + // TODO: Remove this field in favor of the `owner` role in `v1`. + Role string `json:"role" protobuf:"bytes,2,opt,name=role"` + // Roles represents the list of roles of this member. + // +optional + Roles []string `json:"roles,omitempty" protobuf:"bytes,3,rep,name=roles"` +} + +// ProjectTolerations contains the tolerations for taints on seed clusters. +type ProjectTolerations struct { + // Defaults contains a list of tolerations that are added to the shoots in this project by default. + // +patchMergeKey=key + // +patchStrategy=merge + // +optional + Defaults []Toleration `json:"defaults,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,rep,name=defaults"` + // Whitelist contains a list of tolerations that are allowed to be added to the shoots in this project. Please note + // that this list may only be added by users having the `spec-tolerations-whitelist` verb for project resources. + // +patchMergeKey=key + // +patchStrategy=merge + // +optional + Whitelist []Toleration `json:"whitelist,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,2,rep,name=whitelist"` +} + +// Toleration is a toleration for a seed taint. +type Toleration struct { + // Key is the toleration key to be applied to a project or shoot. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + // Value is the toleration value corresponding to the toleration key. + // +optional + Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` +} + +const ( + // ProjectMemberAdmin is a const for a role that provides full admin access. + ProjectMemberAdmin = "admin" + // ProjectMemberOwner is a const for a role that provides full owner access. + ProjectMemberOwner = "owner" + // ProjectMemberViewer is a const for a role that provides limited permissions to only view some resources. + ProjectMemberViewer = "viewer" + // ProjectMemberUserAccessManager is a const for a role that provides permissions to manage human user(s, (groups)). + ProjectMemberUserAccessManager = "uam" + // ProjectMemberExtensionPrefix is a prefix for custom roles that are not known by Gardener. + ProjectMemberExtensionPrefix = "extension:" +) + +// ProjectPhase is a label for the condition of a project at the current time. +type ProjectPhase string + +const ( + // ProjectPending indicates that the project reconciliation is pending. + ProjectPending ProjectPhase = "Pending" + // ProjectReady indicates that the project reconciliation was successful. + ProjectReady ProjectPhase = "Ready" + // ProjectFailed indicates that the project reconciliation failed. + ProjectFailed ProjectPhase = "Failed" + // ProjectTerminating indicates that the project is in termination process. + ProjectTerminating ProjectPhase = "Terminating" + + // ProjectEventNamespaceReconcileFailed indicates that the namespace reconciliation has failed. + ProjectEventNamespaceReconcileFailed = "NamespaceReconcileFailed" + // ProjectEventNamespaceReconcileSuccessful indicates that the namespace reconciliation has succeeded. + ProjectEventNamespaceReconcileSuccessful = "NamespaceReconcileSuccessful" + // ProjectEventNamespaceNotEmpty indicates that the namespace cannot be released because it is not empty. + ProjectEventNamespaceNotEmpty = "NamespaceNotEmpty" + // ProjectEventNamespaceDeletionFailed indicates that the namespace deletion failed. + ProjectEventNamespaceDeletionFailed = "NamespaceDeletionFailed" + // ProjectEventNamespaceMarkedForDeletion indicates that the namespace has been successfully marked for deletion. + ProjectEventNamespaceMarkedForDeletion = "NamespaceMarkedForDeletion" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_quota.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_quota.go new file mode 100644 index 000000000..07269b294 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_quota.go @@ -0,0 +1,56 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type Quota struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Spec defines the Quota constraints. + // +optional + Spec QuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// QuotaList is a collection of Quotas. +type QuotaList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of Quotas. + Items []Quota `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// QuotaSpec is the specification of a Quota. +type QuotaSpec struct { + // ClusterLifetimeDays is the lifetime of a Shoot cluster in days before it will be terminated automatically. + // +optional + ClusterLifetimeDays *int32 `json:"clusterLifetimeDays,omitempty" protobuf:"varint,1,opt,name=clusterLifetimeDays"` + // Metrics is a list of resources which will be put under constraints. + Metrics corev1.ResourceList `json:"metrics" protobuf:"bytes,2,rep,name=metrics,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName"` + // Scope is the scope of the Quota object, either 'project' or 'secret'. + Scope corev1.ObjectReference `json:"scope" protobuf:"bytes,3,opt,name=scope"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_secretbinding.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_secretbinding.go new file mode 100644 index 000000000..a391b5cba --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_secretbinding.go @@ -0,0 +1,47 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type SecretBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // SecretRef is a reference to a secret object in the same or another namespace. + SecretRef corev1.SecretReference `json:"secretRef" protobuf:"bytes,2,opt,name=secretRef"` + // Quotas is a list of references to Quota objects in the same or another namespace. + // +optional + Quotas []corev1.ObjectReference `json:"quotas,omitempty" protobuf:"bytes,3,rep,name=quotas"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SecretBindingList is a collection of SecretBindings. +type SecretBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of SecretBindings. + Items []SecretBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_seed.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_seed.go new file mode 100644 index 000000000..25d8e7e1f --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_seed.go @@ -0,0 +1,307 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Seed represents an installation request for an external controller. +type Seed struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Spec contains the specification of this installation. + Spec SeedSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Status contains the status of this installation. + Status SeedStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SeedList is a collection of Seeds. +type SeedList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of Seeds. + Items []Seed `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// SeedSpec is the specification of a Seed. +type SeedSpec struct { + // Backup holds the object store configuration for the backups of shoot (currently only etcd). + // If it is not specified, then there won't be any backups taken for shoots associated with this seed. + // If backup field is present in seed, then backups of the etcd from shoot control plane will be stored + // under the configured object store. + // +optional + Backup *SeedBackup `json:"backup,omitempty" protobuf:"bytes,1,opt,name=backup"` + // BlockCIDRs is a list of network addresses that should be blocked for shoot control plane components running + // in the seed cluster. + // +optional + BlockCIDRs []string `json:"blockCIDRs,omitempty" protobuf:"bytes,2,rep,name=blockCIDRs"` + // DNS contains DNS-relevant information about this seed cluster. + DNS SeedDNS `json:"dns" protobuf:"bytes,3,opt,name=dns"` + // Networks defines the pod, service and worker network of the Seed cluster. + Networks SeedNetworks `json:"networks" protobuf:"bytes,4,opt,name=networks"` + // Provider defines the provider type and region for this Seed cluster. + Provider SeedProvider `json:"provider" protobuf:"bytes,5,opt,name=provider"` + // SecretRef is a reference to a Secret object containing the Kubeconfig and the cloud provider credentials for + // the account the Seed cluster has been deployed to. + // +optional + SecretRef *corev1.SecretReference `json:"secretRef,omitempty" protobuf:"bytes,6,opt,name=secretRef"` + // Taints describes taints on the seed. + // +optional + Taints []SeedTaint `json:"taints,omitempty" protobuf:"bytes,7,rep,name=taints"` + // Volume contains settings for persistentvolumes created in the seed cluster. + // +optional + Volume *SeedVolume `json:"volume,omitempty" protobuf:"bytes,8,opt,name=volume"` + // Settings contains certain settings for this seed cluster. + // +optional + Settings *SeedSettings `json:"settings,omitempty" protobuf:"bytes,9,opt,name=settings"` + // Ingress configures Ingress specific settings of the Seed cluster. + // +optional + Ingress *Ingress `json:"ingress,omitempty" protobuf:"bytes,10,opt,name=ingress"` +} + +// SeedStatus is the status of a Seed. +type SeedStatus struct { + // Conditions represents the latest available observations of a Seed's current state. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + // Gardener holds information about the Gardener instance which last acted on the Seed. + // +optional + Gardener *Gardener `json:"gardener,omitempty" protobuf:"bytes,2,opt,name=gardener"` + // KubernetesVersion is the Kubernetes version of the seed cluster. + // +optional + KubernetesVersion *string `json:"kubernetesVersion,omitempty" protobuf:"bytes,3,opt,name=kubernetesVersion"` + // ObservedGeneration is the most recent generation observed for this Seed. It corresponds to the + // Seed's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,4,opt,name=observedGeneration"` + // ClusterIdentity is the identity of the Seed cluster + // +optional + ClusterIdentity *string `json:"clusterIdentity,omitempty" protobuf:"bytes,5,opt,name=clusterIdentity"` +} + +// SeedBackup contains the object store configuration for backups for shoot (currently only etcd). +type SeedBackup struct { + // Provider is a provider name. + Provider string `json:"provider" protobuf:"bytes,1,opt,name=provider"` + // ProviderConfig is the configuration passed to BackupBucket resource. + // +optional + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` + // Region is a region name. + // +optional + Region *string `json:"region,omitempty" protobuf:"bytes,3,opt,name=region"` + // SecretRef is a reference to a Secret object containing the cloud provider credentials for + // the object store where backups should be stored. It should have enough privileges to manipulate + // the objects as well as buckets. + SecretRef corev1.SecretReference `json:"secretRef" protobuf:"bytes,4,opt,name=secretRef"` +} + +// SeedDNS contains DNS-relevant information about this seed cluster. +type SeedDNS struct { + // IngressDomain is the domain of the Seed cluster pointing to the ingress controller endpoint. It will be used + // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable. + // This will be removed in the next API version and replaced by spec.ingress.domain. + // +optional + IngressDomain *string `json:"ingressDomain,omitempty" protobuf:"bytes,1,opt,name=ingressDomain"` + // Provider configures a DNSProvider + // +optional + Provider *SeedDNSProvider `json:"provider,omitempty" protobuf:"bytes,2,opt,name=provider"` +} + +// SeedDNSProvider configures a DNSProvider +type SeedDNSProvider struct { + // Type describes the type of the dns-provider, for example `aws-route53` + Type string `json:"type" protobuf:"bytes,1,opt,name=type"` + // SecretRef is a reference to a Secret object containing cloud provider credentials used for registering external domains. + SecretRef corev1.SecretReference `json:"secretRef" protobuf:"bytes,2,opt,name=secretRef"` + // Domains contains information about which domains shall be included/excluded for this provider. + // +optional + Domains *DNSIncludeExclude `json:"domains,omitempty" protobuf:"bytes,3,opt,name=domains"` + // Zones contains information about which hosted zones shall be included/excluded for this provider. + // +optional + Zones *DNSIncludeExclude `json:"zones,omitempty" protobuf:"bytes,4,opt,name=zones"` +} + +// Ingress configures the Ingress specific settings of the Seed cluster. +type Ingress struct { + // Domain specifies the IngressDomain of the Seed cluster pointing to the ingress controller endpoint. It will be used + // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable. + Domain string `json:"domain" protobuf:"bytes,1,opt,name=domain"` + // Controller configures a Gardener managed Ingress Controller listening on the ingressDomain + Controller IngressController `json:"controller" protobuf:"bytes,2,opt,name=controller"` +} + +// IngressController enables a Gardener managed Ingress Controller listening on the ingressDomain +type IngressController struct { + // Kind defines which kind of IngressController to use, for example `nginx` + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // ProviderConfig specifies infrastructure specific configuration for the ingressController + // +optional + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` +} + +// SeedNetworks contains CIDRs for the pod, service and node networks of a Kubernetes cluster. +type SeedNetworks struct { + // Nodes is the CIDR of the node network. + // +optional + Nodes *string `json:"nodes,omitempty" protobuf:"bytes,1,opt,name=nodes"` + // Pods is the CIDR of the pod network. + Pods string `json:"pods" protobuf:"bytes,2,opt,name=pods"` + // Services is the CIDR of the service network. + Services string `json:"services" protobuf:"bytes,3,opt,name=services"` + // ShootDefaults contains the default networks CIDRs for shoots. + // +optional + ShootDefaults *ShootNetworks `json:"shootDefaults,omitempty" protobuf:"bytes,4,opt,name=shootDefaults"` +} + +// ShootNetworks contains the default networks CIDRs for shoots. +type ShootNetworks struct { + // Pods is the CIDR of the pod network. + // +optional + Pods *string `json:"pods,omitempty" protobuf:"bytes,1,opt,name=pods"` + // Services is the CIDR of the service network. + // +optional + Services *string `json:"services,omitempty" protobuf:"bytes,2,opt,name=services"` +} + +// SeedProvider defines the provider type and region for this Seed cluster. +type SeedProvider struct { + // Type is the name of the provider. + Type string `json:"type" protobuf:"bytes,1,opt,name=type"` + // ProviderConfig is the configuration passed to Seed resource. + // +optional + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` + // Region is a name of a region. + Region string `json:"region" protobuf:"bytes,3,opt,name=region"` +} + +// SeedSettings contains certain settings for this seed cluster. +type SeedSettings struct { + // ExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the seed. + // +optional + ExcessCapacityReservation *SeedSettingExcessCapacityReservation `json:"excessCapacityReservation,omitempty" protobuf:"bytes,1,opt,name=excessCapacityReservation"` + // Scheduling controls settings for scheduling decisions for the seed. + // +optional + Scheduling *SeedSettingScheduling `json:"scheduling,omitempty" protobuf:"bytes,2,opt,name=scheduling"` + // ShootDNS controls the shoot DNS settings for the seed. + // +optional + ShootDNS *SeedSettingShootDNS `json:"shootDNS,omitempty" protobuf:"bytes,3,opt,name=shootDNS"` + // LoadBalancerServices controls certain settings for services of type load balancer that are created in the + // seed. + // +optional + LoadBalancerServices *SeedSettingLoadBalancerServices `json:"loadBalancerServices,omitempty" protobuf:"bytes,4,opt,name=loadBalancerServices"` + // VerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the seed. + // +optional + VerticalPodAutoscaler *SeedSettingVerticalPodAutoscaler `json:"verticalPodAutoscaler,omitempty" protobuf:"bytes,5,opt,name=verticalPodAutoscaler"` +} + +// SeedSettingExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the +// seed. When enabled then this is done via PodPriority and requires the Seed cluster to have Kubernetes version 1.11 +// or the PodPriority feature gate as well as the scheduling.k8s.io/v1alpha1 API group enabled. +type SeedSettingExcessCapacityReservation struct { + // Enabled controls whether the excess capacity reservation should be enabled. + Enabled bool `json:"enabled" protobuf:"bytes,1,opt,name=enabled"` +} + +// SeedSettingShootDNS controls the shoot DNS settings for the seed. +type SeedSettingShootDNS struct { + // Enabled controls whether the DNS for shoot clusters should be enabled. When disabled then all shoots using the + // seed won't get any DNS providers, DNS records, and no DNS extension controller is required to be installed here. + // This is useful for environments where DNS is not required. + Enabled bool `json:"enabled" protobuf:"bytes,1,opt,name=enabled"` +} + +// SeedSettingScheduling controls settings for scheduling decisions for the seed. +type SeedSettingScheduling struct { + // Visible controls whether the gardener-scheduler shall consider this seed when scheduling shoots. Invisible seeds + // are not considered by the scheduler. + Visible bool `json:"visible" protobuf:"bytes,1,opt,name=visible"` +} + +// SeedSettingLoadBalancerServices controls certain settings for services of type load balancer that are created in the +// seed. +type SeedSettingLoadBalancerServices struct { + // Annotations is a map of annotations that will be injected/merged into every load balancer service object. + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,1,rep,name=annotations"` +} + +// SeedSettingVerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the +// seed. +type SeedSettingVerticalPodAutoscaler struct { + // Enabled controls whether the VPA components shall be deployed into the garden namespace in the seed cluster. It + // is enabled by default because Gardener heavily relies on a VPA being deployed. You should only disable this if + // your seed cluster already has another, manually/custom managed VPA deployment. + Enabled bool `json:"enabled" protobuf:"bytes,1,opt,name=enabled"` +} + +// SeedTaint describes a taint on a seed. +type SeedTaint struct { + // Key is the taint key to be applied to a seed. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + // Value is the taint value corresponding to the taint key. + // +optional + Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` +} + +const ( + // SeedTaintProtected is a constant for a taint key on a seed that marks it as protected. Protected seeds + // may only be used by shoots in the `garden` namespace. + SeedTaintProtected = "seed.gardener.cloud/protected" +) + +// SeedVolume contains settings for persistentvolumes created in the seed cluster. +type SeedVolume struct { + // MinimumSize defines the minimum size that should be used for PVCs in the seed. + // +optional + MinimumSize *resource.Quantity `json:"minimumSize,omitempty" protobuf:"bytes,1,opt,name=minimumSize"` + // Providers is a list of storage class provisioner types for the seed. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + Providers []SeedVolumeProvider `json:"providers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=providers"` +} + +// SeedVolumeProvider is a storage class provisioner type. +type SeedVolumeProvider struct { + // Purpose is the purpose of this provider. + Purpose string `json:"purpose" protobuf:"bytes,1,opt,name=purpose"` + // Name is the name of the storage class provisioner type. + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` +} + +const ( + // SeedBootstrapped is a constant for a condition type indicating that the seed cluster has been + // bootstrapped. + SeedBootstrapped ConditionType = "Bootstrapped" + // SeedExtensionsReady is a constant for a condition type indicating that the extensions are ready. + SeedExtensionsReady ConditionType = "ExtensionsReady" + // SeedGardenletReady is a constant for a condition type indicating that the Gardenlet is ready. + SeedGardenletReady ConditionType = "GardenletReady" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shoot.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shoot.go new file mode 100644 index 000000000..59dbd5292 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shoot.go @@ -0,0 +1,1181 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "time" + + autoscalingv1 "k8s.io/api/autoscaling/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type Shoot struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the Shoot cluster. + // +optional + Spec ShootSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Most recently observed status of the Shoot cluster. + // +optional + Status ShootStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ShootList is a list of Shoot objects. +type ShootList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of Shoots. + Items []Shoot `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ShootSpec is the specification of a Shoot. +type ShootSpec struct { + // Addons contains information about enabled/disabled addons and their configuration. + // +optional + Addons *Addons `json:"addons,omitempty" protobuf:"bytes,1,opt,name=addons"` + // CloudProfileName is a name of a CloudProfile object. + CloudProfileName string `json:"cloudProfileName" protobuf:"bytes,2,opt,name=cloudProfileName"` + // DNS contains information about the DNS settings of the Shoot. + // +optional + DNS *DNS `json:"dns,omitempty" protobuf:"bytes,3,opt,name=dns"` + // Extensions contain type and provider information for Shoot extensions. + // +optional + Extensions []Extension `json:"extensions,omitempty" protobuf:"bytes,4,rep,name=extensions"` + // Hibernation contains information whether the Shoot is suspended or not. + // +optional + Hibernation *Hibernation `json:"hibernation,omitempty" protobuf:"bytes,5,opt,name=hibernation"` + // Kubernetes contains the version and configuration settings of the control plane components. + Kubernetes Kubernetes `json:"kubernetes" protobuf:"bytes,6,opt,name=kubernetes"` + // Networking contains information about cluster networking such as CNI Plugin type, CIDRs, ...etc. + Networking Networking `json:"networking" protobuf:"bytes,7,opt,name=networking"` + // Maintenance contains information about the time window for maintenance operations and which + // operations should be performed. + // +optional + Maintenance *Maintenance `json:"maintenance,omitempty" protobuf:"bytes,8,opt,name=maintenance"` + // Monitoring contains information about custom monitoring configurations for the shoot. + // +optional + Monitoring *Monitoring `json:"monitoring,omitempty" protobuf:"bytes,9,opt,name=monitoring"` + // Provider contains all provider-specific and provider-relevant information. + Provider Provider `json:"provider" protobuf:"bytes,10,opt,name=provider"` + // Purpose is the purpose class for this cluster. + // +optional + Purpose *ShootPurpose `json:"purpose,omitempty" protobuf:"bytes,11,opt,name=purpose"` + // Region is a name of a region. + Region string `json:"region" protobuf:"bytes,12,opt,name=region"` + // SecretBindingName is the name of the a SecretBinding that has a reference to the provider secret. + // The credentials inside the provider secret will be used to create the shoot in the respective account. + SecretBindingName string `json:"secretBindingName" protobuf:"bytes,13,opt,name=secretBindingName"` + // SeedName is the name of the seed cluster that runs the control plane of the Shoot. + // +optional + SeedName *string `json:"seedName,omitempty" protobuf:"bytes,14,opt,name=seedName"` + // SeedSelector is an optional selector which must match a seed's labels for the shoot to be scheduled on that seed. + // +optional + SeedSelector *SeedSelector `json:"seedSelector,omitempty" protobuf:"bytes,15,opt,name=seedSelector"` + // Resources holds a list of named resource references that can be referred to in extension configs by their names. + // +optional + Resources []NamedResourceReference `json:"resources,omitempty" protobuf:"bytes,16,rep,name=resources"` + // Tolerations contains the tolerations for taints on seed clusters. + // +patchMergeKey=key + // +patchStrategy=merge + // +optional + Tolerations []Toleration `json:"tolerations,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,17,rep,name=tolerations"` +} + +// ShootStatus holds the most recently observed status of the Shoot cluster. +type ShootStatus struct { + // Conditions represents the latest available observations of a Shoots's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + // Constraints represents conditions of a Shoot's current state that constraint some operations on it. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Constraints []Condition `json:"constraints,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=constraints"` + // Gardener holds information about the Gardener which last acted on the Shoot. + Gardener Gardener `json:"gardener" protobuf:"bytes,3,opt,name=gardener"` + // IsHibernated indicates whether the Shoot is currently hibernated. + IsHibernated bool `json:"hibernated" protobuf:"varint,4,opt,name=hibernated"` + // LastOperation holds information about the last operation on the Shoot. + // +optional + LastOperation *LastOperation `json:"lastOperation,omitempty" protobuf:"bytes,5,opt,name=lastOperation"` + // LastError holds information about the last occurred error during an operation. + // +optional + LastError *LastError `json:"lastError,omitempty" protobuf:"bytes,6,opt,name=lastError"` + // LastErrors holds information about the last occurred error(s) during an operation. + // +optional + LastErrors []LastError `json:"lastErrors,omitempty" protobuf:"bytes,7,rep,name=lastErrors"` + // ObservedGeneration is the most recent generation observed for this Shoot. It corresponds to the + // Shoot's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,8,opt,name=observedGeneration"` + // RetryCycleStartTime is the start time of the last retry cycle (used to determine how often an operation + // must be retried until we give up). + // +optional + RetryCycleStartTime *metav1.Time `json:"retryCycleStartTime,omitempty" protobuf:"bytes,9,opt,name=retryCycleStartTime"` + // Seed is the name of the seed cluster that runs the control plane of the Shoot. This value is only written + // after a successful create/reconcile operation. It will be used when control planes are moved between Seeds. + // +optional + Seed *string `json:"seed,omitempty" protobuf:"bytes,10,opt,name=seed"` + // TechnicalID is the name that is used for creating the Seed namespace, the infrastructure resources, and + // basically everything that is related to this particular Shoot. + TechnicalID string `json:"technicalID" protobuf:"bytes,11,opt,name=technicalID"` + // UID is a unique identifier for the Shoot cluster to avoid portability between Kubernetes clusters. + // It is used to compute unique hashes. + UID types.UID `json:"uid" protobuf:"bytes,12,opt,name=uid"` + // ClusterIdentity is the identity of the Shoot cluster + // +optional + ClusterIdentity *string `json:"clusterIdentity,omitempty" protobuf:"bytes,13,opt,name=clusterIdentity"` +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Addons relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Addons is a collection of configuration for specific addons which are managed by the Gardener. +type Addons struct { + // KubernetesDashboard holds configuration settings for the kubernetes dashboard addon. + // +optional + KubernetesDashboard *KubernetesDashboard `json:"kubernetes-dashboard,omitempty" protobuf:"bytes,1,opt,name=kubernetesDashboard"` + // NginxIngress holds configuration settings for the nginx-ingress addon. + // +optional + NginxIngress *NginxIngress `json:"nginx-ingress,omitempty" protobuf:"bytes,2,opt,name=nginxIngress"` +} + +// Addon allows enabling or disabling a specific addon and is used to derive from. +type Addon struct { + // Enabled indicates whether the addon is enabled or not. + Enabled bool `json:"enabled" protobuf:"varint,1,opt,name=enabled"` +} + +// KubernetesDashboard describes configuration values for the kubernetes-dashboard addon. +type KubernetesDashboard struct { + Addon `json:",inline" protobuf:"bytes,2,opt,name=addon"` + // AuthenticationMode defines the authentication mode for the kubernetes-dashboard. + // +optional + AuthenticationMode *string `json:"authenticationMode,omitempty" protobuf:"bytes,1,opt,name=authenticationMode"` +} + +const ( + // KubernetesDashboardAuthModeBasic uses basic authentication mode for auth. + KubernetesDashboardAuthModeBasic = "basic" + // KubernetesDashboardAuthModeToken uses token-based mode for auth. + KubernetesDashboardAuthModeToken = "token" +) + +// NginxIngress describes configuration values for the nginx-ingress addon. +type NginxIngress struct { + Addon `json:",inline" protobuf:"bytes,4,opt,name=addon"` + // LoadBalancerSourceRanges is list of allowed IP sources for NginxIngress + // +optional + LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,1,rep,name=loadBalancerSourceRanges"` + // Config contains custom configuration for the nginx-ingress-controller configuration. + // See https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md#configuration-options + // +optional + Config map[string]string `json:"config,omitempty" protobuf:"bytes,2,rep,name=config"` + // ExternalTrafficPolicy controls the `.spec.externalTrafficPolicy` value of the load balancer `Service` + // exposing the nginx-ingress. Defaults to `Cluster`. + // +optional + ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,3,opt,name=externalTrafficPolicy,casttype=k8s.io/api/core/v1.ServiceExternalTrafficPolicyType"` +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// DNS relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// DNS holds information about the provider, the hosted zone id and the domain. +type DNS struct { + // Domain is the external available domain of the Shoot cluster. This domain will be written into the + // kubeconfig that is handed out to end-users. Once set it is immutable. + // +optional + Domain *string `json:"domain,omitempty" protobuf:"bytes,1,opt,name=domain"` + // Providers is a list of DNS providers that shall be enabled for this shoot cluster. Only relevant if + // not a default domain is used. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Providers []DNSProvider `json:"providers,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=providers"` +} + +// DNSProvider contains information about a DNS provider. +type DNSProvider struct { + // Domains contains information about which domains shall be included/excluded for this provider. + // +optional + Domains *DNSIncludeExclude `json:"domains,omitempty" protobuf:"bytes,1,opt,name=domains"` + // Primary indicates that this DNSProvider is used for shoot related domains. + // +optional + Primary *bool `json:"primary,omitempty" protobuf:"varint,2,opt,name=primary"` + // SecretName is a name of a secret containing credentials for the stated domain and the + // provider. When not specified, the Gardener will use the cloud provider credentials referenced + // by the Shoot and try to find respective credentials there (primary provider only). Specifying this field may override + // this behavior, i.e. forcing the Gardener to only look into the given secret. + // +optional + SecretName *string `json:"secretName,omitempty" protobuf:"bytes,3,opt,name=secretName"` + // Type is the DNS provider type. + // +optional + Type *string `json:"type,omitempty" protobuf:"bytes,4,opt,name=type"` + // Zones contains information about which hosted zones shall be included/excluded for this provider. + // +optional + Zones *DNSIncludeExclude `json:"zones,omitempty" protobuf:"bytes,5,opt,name=zones"` +} + +type DNSIncludeExclude struct { + // Include is a list of resources that shall be included. + // +optional + Include []string `json:"include,omitempty" protobuf:"bytes,1,rep,name=include"` + // Exclude is a list of resources that shall be excluded. + // +optional + Exclude []string `json:"exclude,omitempty" protobuf:"bytes,2,rep,name=exclude"` +} + +// DefaultDomain is the default value in the Shoot's '.spec.dns.domain' when '.spec.dns.provider' is 'unmanaged' +const DefaultDomain = "cluster.local" + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Extension relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Extension contains type and provider information for Shoot extensions. +type Extension struct { + // Type is the type of the extension resource. + Type string `json:"type" protobuf:"bytes,1,opt,name=type"` + // ProviderConfig is the configuration passed to extension resource. + // +optional + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` + // Disabled allows to disable extensions that were marked as 'globally enabled' by Gardener administrators. + // +optional + Disabled *bool `json:"disabled,omitempty" protobuf:"varint,3,opt,name=disabled"` +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// NamedResourceReference relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// NamedResourceReference is a named reference to a resource. +type NamedResourceReference struct { + // Name of the resource reference. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // ResourceRef is a reference to a resource. + ResourceRef autoscalingv1.CrossVersionObjectReference `json:"resourceRef" protobuf:"bytes,2,opt,name=resourceRef"` +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Hibernation relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Hibernation contains information whether the Shoot is suspended or not. +type Hibernation struct { + // Enabled specifies whether the Shoot needs to be hibernated or not. If it is true, the Shoot's desired state is to be hibernated. + // If it is false or nil, the Shoot's desired state is to be awaken. + // +optional + Enabled *bool `json:"enabled,omitempty" protobuf:"varint,1,opt,name=enabled"` + // Schedules determine the hibernation schedules. + // +optional + Schedules []HibernationSchedule `json:"schedules,omitempty" protobuf:"bytes,2,rep,name=schedules"` +} + +// HibernationSchedule determines the hibernation schedule of a Shoot. +// A Shoot will be regularly hibernated at each start time and will be woken up at each end time. +// Start or End can be omitted, though at least one of each has to be specified. +type HibernationSchedule struct { + // Start is a Cron spec at which time a Shoot will be hibernated. + // +optional + Start *string `json:"start,omitempty" protobuf:"bytes,1,opt,name=start"` + // End is a Cron spec at which time a Shoot will be woken up. + // +optional + End *string `json:"end,omitempty" protobuf:"bytes,2,opt,name=end"` + // Location is the time location in which both start and and shall be evaluated. + // +optional + Location *string `json:"location,omitempty" protobuf:"bytes,3,opt,name=location"` +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Kubernetes relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Kubernetes contains the version and configuration variables for the Shoot control plane. +type Kubernetes struct { + // AllowPrivilegedContainers indicates whether privileged containers are allowed in the Shoot (default: true). + // +optional + AllowPrivilegedContainers *bool `json:"allowPrivilegedContainers,omitempty" protobuf:"varint,1,opt,name=allowPrivilegedContainers"` + // ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler. + // +optional + ClusterAutoscaler *ClusterAutoscaler `json:"clusterAutoscaler,omitempty" protobuf:"bytes,2,opt,name=clusterAutoscaler"` + // KubeAPIServer contains configuration settings for the kube-apiserver. + // +optional + KubeAPIServer *KubeAPIServerConfig `json:"kubeAPIServer,omitempty" protobuf:"bytes,3,opt,name=kubeAPIServer"` + // KubeControllerManager contains configuration settings for the kube-controller-manager. + // +optional + KubeControllerManager *KubeControllerManagerConfig `json:"kubeControllerManager,omitempty" protobuf:"bytes,4,opt,name=kubeControllerManager"` + // KubeScheduler contains configuration settings for the kube-scheduler. + // +optional + KubeScheduler *KubeSchedulerConfig `json:"kubeScheduler,omitempty" protobuf:"bytes,5,opt,name=kubeScheduler"` + // KubeProxy contains configuration settings for the kube-proxy. + // +optional + KubeProxy *KubeProxyConfig `json:"kubeProxy,omitempty" protobuf:"bytes,6,opt,name=kubeProxy"` + // Kubelet contains configuration settings for the kubelet. + // +optional + Kubelet *KubeletConfig `json:"kubelet,omitempty" protobuf:"bytes,7,opt,name=kubelet"` + // Version is the semantic Kubernetes version to use for the Shoot cluster. + Version string `json:"version" protobuf:"bytes,8,opt,name=version"` + // VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler. + // +optional + VerticalPodAutoscaler *VerticalPodAutoscaler `json:"verticalPodAutoscaler,omitempty" protobuf:"bytes,9,opt,name=verticalPodAutoscaler"` +} + +// ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler. +type ClusterAutoscaler struct { + // ScaleDownDelayAfterAdd defines how long after scale up that scale down evaluation resumes (default: 1 hour). + // +optional + ScaleDownDelayAfterAdd *metav1.Duration `json:"scaleDownDelayAfterAdd,omitempty" protobuf:"bytes,1,opt,name=scaleDownDelayAfterAdd"` + // ScaleDownDelayAfterDelete how long after node deletion that scale down evaluation resumes, defaults to scanInterval (defaults to ScanInterval). + // +optional + ScaleDownDelayAfterDelete *metav1.Duration `json:"scaleDownDelayAfterDelete,omitempty" protobuf:"bytes,2,opt,name=scaleDownDelayAfterDelete"` + // ScaleDownDelayAfterFailure how long after scale down failure that scale down evaluation resumes (default: 3 mins). + // +optional + ScaleDownDelayAfterFailure *metav1.Duration `json:"scaleDownDelayAfterFailure,omitempty" protobuf:"bytes,3,opt,name=scaleDownDelayAfterFailure"` + // ScaleDownUnneededTime defines how long a node should be unneeded before it is eligible for scale down (default: 30 mins). + // +optional + ScaleDownUnneededTime *metav1.Duration `json:"scaleDownUnneededTime,omitempty" protobuf:"bytes,4,opt,name=scaleDownUnneededTime"` + // ScaleDownUtilizationThreshold defines the threshold in % under which a node is being removed + // +optional + ScaleDownUtilizationThreshold *float64 `json:"scaleDownUtilizationThreshold,omitempty" protobuf:"fixed64,5,opt,name=scaleDownUtilizationThreshold"` + // ScanInterval how often cluster is reevaluated for scale up or down (default: 10 secs). + // +optional + ScanInterval *metav1.Duration `json:"scanInterval,omitempty" protobuf:"bytes,6,opt,name=scanInterval"` +} + +// VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler. +type VerticalPodAutoscaler struct { + // Enabled specifies whether the Kubernetes VPA shall be enabled for the shoot cluster. + Enabled bool `json:"enabled" protobuf:"varint,1,opt,name=enabled"` + // EvictAfterOOMThreshold defines the threshold that will lead to pod eviction in case it OOMed in less than the given + // threshold since its start and if it has only one container (default: 10m0s). + // +optional + EvictAfterOOMThreshold *metav1.Duration `json:"evictAfterOOMThreshold,omitempty" protobuf:"bytes,2,opt,name=evictAfterOOMThreshold"` + // EvictionRateBurst defines the burst of pods that can be evicted (default: 1) + // +optional + EvictionRateBurst *int32 `json:"evictionRateBurst,omitempty" protobuf:"varint,3,opt,name=evictionRateBurst"` + // EvictionRateLimit defines the number of pods that can be evicted per second. A rate limit set to 0 or -1 will + // disable the rate limiter (default: -1). + // +optional + EvictionRateLimit *float64 `json:"evictionRateLimit,omitempty" protobuf:"fixed64,4,opt,name=evictionRateLimit"` + // EvictionTolerance defines the fraction of replica count that can be evicted for update in case more than one + // pod can be evicted (default: 0.5). + // +optional + EvictionTolerance *float64 `json:"evictionTolerance,omitempty" protobuf:"fixed64,5,opt,name=evictionTolerance"` + // RecommendationMarginFraction is the fraction of usage added as the safety margin to the recommended request + // (default: 0.15). + // +optional + RecommendationMarginFraction *float64 `json:"recommendationMarginFraction,omitempty" protobuf:"fixed64,6,opt,name=recommendationMarginFraction"` + // UpdaterInterval is the interval how often the updater should run (default: 1m0s). + // +optional + UpdaterInterval *metav1.Duration `json:"updaterInterval,omitempty" protobuf:"bytes,7,opt,name=updaterInterval"` + // RecommenderInterval is the interval how often metrics should be fetched (default: 1m0s). + // +optional + RecommenderInterval *metav1.Duration `json:"recommenderInterval,omitempty" protobuf:"bytes,8,opt,name=recommenderInterval"` +} + +const ( + // DefaultEvictionRateBurst is the default value for the EvictionRateBurst field in the VPA configuration. + DefaultEvictionRateBurst int32 = 1 + // DefaultEvictionRateLimit is the default value for the EvictionRateLimit field in the VPA configuration. + DefaultEvictionRateLimit float64 = -1 + // DefaultEvictionTolerance is the default value for the EvictionTolerance field in the VPA configuration. + DefaultEvictionTolerance = 0.5 + // DefaultRecommendationMarginFraction is the default value for the RecommendationMarginFraction field in the VPA configuration. + DefaultRecommendationMarginFraction = 0.15 +) + +var ( + // DefaultEvictAfterOOMThreshold is the default value for the EvictAfterOOMThreshold field in the VPA configuration. + DefaultEvictAfterOOMThreshold = metav1.Duration{Duration: 10 * time.Minute} + // DefaultUpdaterInterval is the default value for the UpdaterInterval field in the VPA configuration. + DefaultUpdaterInterval = metav1.Duration{Duration: time.Minute} + // DefaultRecommenderInterval is the default value for the RecommenderInterval field in the VPA configuration. + DefaultRecommenderInterval = metav1.Duration{Duration: time.Minute} +) + +// KubernetesConfig contains common configuration fields for the control plane components. +type KubernetesConfig struct { + // FeatureGates contains information about enabled feature gates. + // +optional + FeatureGates map[string]bool `json:"featureGates,omitempty" protobuf:"bytes,1,rep,name=featureGates"` +} + +// KubeAPIServerConfig contains configuration settings for the kube-apiserver. +type KubeAPIServerConfig struct { + KubernetesConfig `json:",inline" protobuf:"bytes,1,opt,name=kubernetesConfig"` + // AdmissionPlugins contains the list of user-defined admission plugins (additional to those managed by Gardener), and, if desired, the corresponding + // configuration. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + AdmissionPlugins []AdmissionPlugin `json:"admissionPlugins,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=admissionPlugins"` + // APIAudiences are the identifiers of the API. The service account token authenticator will + // validate that tokens used against the API are bound to at least one of these audiences. + // Defaults to ["kubernetes"]. + // +optional + APIAudiences []string `json:"apiAudiences,omitempty" protobuf:"bytes,3,rep,name=apiAudiences"` + // AuditConfig contains configuration settings for the audit of the kube-apiserver. + // +optional + AuditConfig *AuditConfig `json:"auditConfig,omitempty" protobuf:"bytes,4,opt,name=auditConfig"` + // EnableBasicAuthentication defines whether basic authentication should be enabled for this cluster or not. + // +optional + EnableBasicAuthentication *bool `json:"enableBasicAuthentication,omitempty" protobuf:"varint,5,opt,name=enableBasicAuthentication"` + // OIDCConfig contains configuration settings for the OIDC provider. + // +optional + OIDCConfig *OIDCConfig `json:"oidcConfig,omitempty" protobuf:"bytes,6,opt,name=oidcConfig"` + // RuntimeConfig contains information about enabled or disabled APIs. + // +optional + RuntimeConfig map[string]bool `json:"runtimeConfig,omitempty" protobuf:"bytes,7,rep,name=runtimeConfig"` + // ServiceAccountConfig contains configuration settings for the service account handling + // of the kube-apiserver. + // +optional + ServiceAccountConfig *ServiceAccountConfig `json:"serviceAccountConfig,omitempty" protobuf:"bytes,8,opt,name=serviceAccountConfig"` + // WatchCacheSizes contains configuration of the API server's watch cache sizes. + // Configuring these flags might be useful for large-scale Shoot clusters with a lot of parallel update requests + // and a lot of watching controllers (e.g. large shooted Seed clusters). When the API server's watch cache's + // capacity is too small to cope with the amount of update requests and watchers for a particular resource, it + // might happen that controller watches are permanently stopped with `too old resource version` errors. + // Starting from kubernetes v1.19, the API server's watch cache size is adapted dynamically and setting the watch + // cache size flags will have no effect, except when setting it to 0 (which disables the watch cache). + // +optional + WatchCacheSizes *WatchCacheSizes `json:"watchCacheSizes,omitempty" protobuf:"bytes,9,opt,name=watchCacheSizes"` + // Requests contains configuration for request-specific settings for the kube-apiserver. + // +optional + Requests *KubeAPIServerRequests `json:"requests,omitempty" protobuf:"bytes,10,opt,name=requests"` +} + +// KubeAPIServerRequests contains configuration for request-specific settings for the kube-apiserver. +type KubeAPIServerRequests struct { + // MaxNonMutatingInflight is the maximum number of non-mutating requests in flight at a given time. When the server + // exceeds this, it rejects requests. + // +optional + MaxNonMutatingInflight *int32 `json:"maxNonMutatingInflight,omitempty" protobuf:"bytes,1,name=maxNonMutatingInflight"` + // MaxMutatingInflight is the maximum number of mutating requests in flight at a given time. When the server + // exceeds this, it rejects requests. + // +optional + MaxMutatingInflight *int32 `json:"maxMutatingInflight,omitempty" protobuf:"bytes,2,name=maxMutatingInflight"` +} + +// ServiceAccountConfig is the kube-apiserver configuration for service accounts. +type ServiceAccountConfig struct { + // Issuer is the identifier of the service account token issuer. The issuer will assert this + // identifier in "iss" claim of issued tokens. This value is a string or URI. + // Defaults to URI of the API server. + // +optional + Issuer *string `json:"issuer,omitempty" protobuf:"bytes,1,opt,name=issuer"` + // SigningKeySecret is a reference to a secret that contains an optional private key of the + // service account token issuer. The issuer will sign issued ID tokens with this private key. + // Only useful if service account tokens are also issued by another external system. + // +optional + SigningKeySecret *corev1.LocalObjectReference `json:"signingKeySecretName,omitempty" protobuf:"bytes,2,opt,name=signingKeySecretName"` +} + +// AuditConfig contains settings for audit of the api server +type AuditConfig struct { + // AuditPolicy contains configuration settings for audit policy of the kube-apiserver. + // +optional + AuditPolicy *AuditPolicy `json:"auditPolicy,omitempty" protobuf:"bytes,1,opt,name=auditPolicy"` +} + +// AuditPolicy contains audit policy for kube-apiserver +type AuditPolicy struct { + // ConfigMapRef is a reference to a ConfigMap object in the same namespace, + // which contains the audit policy for the kube-apiserver. + // +optional + ConfigMapRef *corev1.ObjectReference `json:"configMapRef,omitempty" protobuf:"bytes,1,opt,name=configMapRef"` +} + +// OIDCConfig contains configuration settings for the OIDC provider. +// Note: Descriptions were taken from the Kubernetes documentation. +type OIDCConfig struct { + // If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used. + // +optional + CABundle *string `json:"caBundle,omitempty" protobuf:"bytes,1,opt,name=caBundle"` + // ClientAuthentication can optionally contain client configuration used for kubeconfig generation. + // +optional + ClientAuthentication *OpenIDConnectClientAuthentication `json:"clientAuthentication,omitempty" protobuf:"bytes,2,opt,name=clientAuthentication"` + // The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set. + // +optional + ClientID *string `json:"clientID,omitempty" protobuf:"bytes,3,opt,name=clientID"` + // If provided, the name of a custom OpenID Connect claim for specifying user groups. The claim value is expected to be a string or array of strings. This flag is experimental, please see the authentication documentation for further details. + // +optional + GroupsClaim *string `json:"groupsClaim,omitempty" protobuf:"bytes,4,opt,name=groupsClaim"` + // If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies. + // +optional + GroupsPrefix *string `json:"groupsPrefix,omitempty" protobuf:"bytes,5,opt,name=groupsPrefix"` + // The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT). + // +optional + IssuerURL *string `json:"issuerURL,omitempty" protobuf:"bytes,6,opt,name=issuerURL"` + // ATTENTION: Only meaningful for Kubernetes >= 1.11 + // key=value pairs that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value. + // +optional + RequiredClaims map[string]string `json:"requiredClaims,omitempty" protobuf:"bytes,7,rep,name=requiredClaims"` + // List of allowed JOSE asymmetric signing algorithms. JWTs with a 'alg' header value not in this list will be rejected. Values are defined by RFC 7518 https://tools.ietf.org/html/rfc7518#section-3.1 + // +optional + SigningAlgs []string `json:"signingAlgs,omitempty" protobuf:"bytes,8,rep,name=signingAlgs"` + // The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable. This flag is experimental, please see the authentication documentation for further details. (default "sub") + // +optional + UsernameClaim *string `json:"usernameClaim,omitempty" protobuf:"bytes,9,opt,name=usernameClaim"` + // If provided, all usernames will be prefixed with this value. If not provided, username claims other than 'email' are prefixed by the issuer URL to avoid clashes. To skip any prefixing, provide the value '-'. + // +optional + UsernamePrefix *string `json:"usernamePrefix,omitempty" protobuf:"bytes,10,opt,name=usernamePrefix"` +} + +// OpenIDConnectClientAuthentication contains configuration for OIDC clients. +type OpenIDConnectClientAuthentication struct { + // Extra configuration added to kubeconfig's auth-provider. + // Must not be any of idp-issuer-url, client-id, client-secret, idp-certificate-authority, idp-certificate-authority-data, id-token or refresh-token + // +optional + ExtraConfig map[string]string `json:"extraConfig,omitempty" protobuf:"bytes,1,rep,name=extraConfig"` + // The client Secret for the OpenID Connect client. + // +optional + Secret *string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` +} + +// AdmissionPlugin contains information about a specific admission plugin and its corresponding configuration. +type AdmissionPlugin struct { + // Name is the name of the plugin. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Config is the configuration of the plugin. + // +optional + Config *runtime.RawExtension `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` +} + +// WatchCacheSizes contains configuration of the API server's watch cache sizes. +type WatchCacheSizes struct { + // Default configures the default watch cache size of the kube-apiserver + // (flag `--default-watch-cache-size`, defaults to 100). + // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/ + // +optional + Default *int32 `json:"default,omitempty" protobuf:"varint,1,opt,name=default"` + // Resources configures the watch cache size of the kube-apiserver per resource + // (flag `--watch-cache-sizes`). + // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/ + // +optional + Resources []ResourceWatchCacheSize `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"` +} + +// ResourceWatchCacheSize contains configuration of the API server's watch cache size for one specific resource. +type ResourceWatchCacheSize struct { + // APIGroup is the API group of the resource for which the watch cache size should be configured. + // An unset value is used to specify the legacy core API (e.g. for `secrets`). + // +optional + APIGroup *string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` + // Resource is the name of the resource for which the watch cache size should be configured + // (in lowercase plural form, e.g. `secrets`). + Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"` + // CacheSize specifies the watch cache size that should be configured for the specified resource. + CacheSize int32 `json:"size" protobuf:"varint,3,opt,name=size"` +} + +// KubeControllerManagerConfig contains configuration settings for the kube-controller-manager. +type KubeControllerManagerConfig struct { + KubernetesConfig `json:",inline" protobuf:"bytes,1,opt,name=kubernetesConfig"` + // HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager. + // +optional + HorizontalPodAutoscalerConfig *HorizontalPodAutoscalerConfig `json:"horizontalPodAutoscaler,omitempty" protobuf:"bytes,2,opt,name=horizontalPodAutoscaler"` + // NodeCIDRMaskSize defines the mask size for node cidr in cluster (default is 24) + // +optional + NodeCIDRMaskSize *int32 `json:"nodeCIDRMaskSize,omitempty" protobuf:"varint,3,opt,name=nodeCIDRMaskSize"` + // PodEvictionTimeout defines the grace period for deleting pods on failed nodes. Defaults to 2m. + // +optional + PodEvictionTimeout *metav1.Duration `json:"podEvictionTimeout,omitempty" protobuf:"bytes,4,opt,name=podEvictionTimeout"` +} + +// HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager. +// Note: Descriptions were taken from the Kubernetes documentation. +type HorizontalPodAutoscalerConfig struct { + // The period after which a ready pod transition is considered to be the first. + // +optional + CPUInitializationPeriod *metav1.Duration `json:"cpuInitializationPeriod,omitempty" protobuf:"bytes,1,opt,name=cpuInitializationPeriod"` + // The period since last downscale, before another downscale can be performed in horizontal pod autoscaler. + // +optional + DownscaleDelay *metav1.Duration `json:"downscaleDelay,omitempty" protobuf:"bytes,2,opt,name=downscaleDelay"` + // The configurable window at which the controller will choose the highest recommendation for autoscaling. + // +optional + DownscaleStabilization *metav1.Duration `json:"downscaleStabilization,omitempty" protobuf:"bytes,3,opt,name=downscaleStabilization"` + // The configurable period at which the horizontal pod autoscaler considers a Pod “not yet ready” given that it’s unready and it has transitioned to unready during that time. + // +optional + InitialReadinessDelay *metav1.Duration `json:"initialReadinessDelay,omitempty" protobuf:"bytes,4,opt,name=initialReadinessDelay"` + // The period for syncing the number of pods in horizontal pod autoscaler. + // +optional + SyncPeriod *metav1.Duration `json:"syncPeriod,omitempty" protobuf:"bytes,5,opt,name=syncPeriod"` + // The minimum change (from 1.0) in the desired-to-actual metrics ratio for the horizontal pod autoscaler to consider scaling. + // +optional + Tolerance *float64 `json:"tolerance,omitempty" protobuf:"fixed64,6,opt,name=tolerance"` + // The period since last upscale, before another upscale can be performed in horizontal pod autoscaler. + // +optional + UpscaleDelay *metav1.Duration `json:"upscaleDelay,omitempty" protobuf:"bytes,7,opt,name=upscaleDelay"` +} + +const ( + // DefaultHPADownscaleDelay is a constant for the default HPA downscale delay for a Shoot cluster. + DefaultHPADownscaleDelay = 15 * time.Minute + // DefaultHPASyncPeriod is a constant for the default HPA sync period for a Shoot cluster. + DefaultHPASyncPeriod = 30 * time.Second + // DefaultHPATolerance is a constant for the default HPA tolerance for a Shoot cluster. + DefaultHPATolerance = 0.1 + // DefaultHPAUpscaleDelay is for the default HPA upscale delay for a Shoot cluster. + DefaultHPAUpscaleDelay = 1 * time.Minute + // DefaultDownscaleStabilization is the default HPA downscale stabilization window for a Shoot cluster + DefaultDownscaleStabilization = 5 * time.Minute + // DefaultInitialReadinessDelay is for the default HPA ReadinessDelay value in the Shoot cluster + DefaultInitialReadinessDelay = 30 * time.Second + // DefaultCPUInitializationPeriod is the for the default value of the CPUInitializationPeriod in the Shoot cluster + DefaultCPUInitializationPeriod = 5 * time.Minute +) + +// KubeSchedulerConfig contains configuration settings for the kube-scheduler. +type KubeSchedulerConfig struct { + KubernetesConfig `json:",inline" protobuf:"bytes,1,opt,name=kubernetesConfig"` + // KubeMaxPDVols allows to configure the `KUBE_MAX_PD_VOLS` environment variable for the kube-scheduler. + // Please find more information here: https://kubernetes.io/docs/concepts/storage/storage-limits/#custom-limits + // Note that using this field is considered alpha-/experimental-level and is on your own risk. You should be aware + // of all the side-effects and consequences when changing it. + // +optional + KubeMaxPDVols *string `json:"kubeMaxPDVols,omitempty" protobuf:"bytes,2,opt,name=kubeMaxPDVols"` +} + +// KubeProxyConfig contains configuration settings for the kube-proxy. +type KubeProxyConfig struct { + KubernetesConfig `json:",inline" protobuf:"bytes,1,opt,name=kubernetesConfig"` + // Mode specifies which proxy mode to use. + // defaults to IPTables. + // +optional + Mode *ProxyMode `json:"mode,omitempty" protobuf:"bytes,2,opt,name=mode,casttype=ProxyMode"` +} + +// ProxyMode available in Linux platform: 'userspace' (older, going to be EOL), 'iptables' +// (newer, faster), 'ipvs' (newest, better in performance and scalability). +// As of now only 'iptables' and 'ipvs' is supported by Gardener. +// In Linux platform, if the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are +// insufficient, this always falls back to the userspace proxy. IPVS mode will be enabled when proxy mode is set to 'ipvs', +// and the fall back path is firstly iptables and then userspace. +type ProxyMode string + +const ( + // ProxyModeIPTables uses iptables as proxy implementation. + ProxyModeIPTables ProxyMode = "IPTables" + // ProxyModeIPVS uses ipvs as proxy implementation. + ProxyModeIPVS ProxyMode = "IPVS" +) + +// KubeletConfig contains configuration settings for the kubelet. +type KubeletConfig struct { + KubernetesConfig `json:",inline" protobuf:"bytes,1,opt,name=kubernetesConfig"` + // CPUCFSQuota allows you to disable/enable CPU throttling for Pods. + // +optional + CPUCFSQuota *bool `json:"cpuCFSQuota,omitempty" protobuf:"varint,2,opt,name=cpuCFSQuota"` + // CPUManagerPolicy allows to set alternative CPU management policies (default: none). + // +optional + CPUManagerPolicy *string `json:"cpuManagerPolicy,omitempty" protobuf:"bytes,3,opt,name=cpuManagerPolicy"` + // EvictionHard describes a set of eviction thresholds (e.g. memory.available<1Gi) that if met would trigger a Pod eviction. + // +optional + // Default: + // memory.available: "100Mi/1Gi/5%" + // nodefs.available: "5%" + // nodefs.inodesFree: "5%" + // imagefs.available: "5%" + // imagefs.inodesFree: "5%" + EvictionHard *KubeletConfigEviction `json:"evictionHard,omitempty" protobuf:"bytes,4,opt,name=evictionHard"` + // EvictionMaxPodGracePeriod describes the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. + // +optional + // Default: 90 + EvictionMaxPodGracePeriod *int32 `json:"evictionMaxPodGracePeriod,omitempty" protobuf:"varint,5,opt,name=evictionMaxPodGracePeriod"` + // EvictionMinimumReclaim configures the amount of resources below the configured eviction threshold that the kubelet attempts to reclaim whenever the kubelet observes resource pressure. + // +optional + // Default: 0 for each resource + EvictionMinimumReclaim *KubeletConfigEvictionMinimumReclaim `json:"evictionMinimumReclaim,omitempty" protobuf:"bytes,6,opt,name=evictionMinimumReclaim"` + // EvictionPressureTransitionPeriod is the duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. + // +optional + // Default: 4m0s + EvictionPressureTransitionPeriod *metav1.Duration `json:"evictionPressureTransitionPeriod,omitempty" protobuf:"bytes,7,opt,name=evictionPressureTransitionPeriod"` + // EvictionSoft describes a set of eviction thresholds (e.g. memory.available<1.5Gi) that if met over a corresponding grace period would trigger a Pod eviction. + // +optional + // Default: + // memory.available: "200Mi/1.5Gi/10%" + // nodefs.available: "10%" + // nodefs.inodesFree: "10%" + // imagefs.available: "10%" + // imagefs.inodesFree: "10%" + EvictionSoft *KubeletConfigEviction `json:"evictionSoft,omitempty" protobuf:"bytes,8,opt,name=evictionSoft"` + // EvictionSoftGracePeriod describes a set of eviction grace periods (e.g. memory.available=1m30s) that correspond to how long a soft eviction threshold must hold before triggering a Pod eviction. + // +optional + // Default: + // memory.available: 1m30s + // nodefs.available: 1m30s + // nodefs.inodesFree: 1m30s + // imagefs.available: 1m30s + // imagefs.inodesFree: 1m30s + EvictionSoftGracePeriod *KubeletConfigEvictionSoftGracePeriod `json:"evictionSoftGracePeriod,omitempty" protobuf:"bytes,9,opt,name=evictionSoftGracePeriod"` + // MaxPods is the maximum number of Pods that are allowed by the Kubelet. + // +optional + // Default: 110 + MaxPods *int32 `json:"maxPods,omitempty" protobuf:"varint,10,opt,name=maxPods"` + // PodPIDsLimit is the maximum number of process IDs per pod allowed by the kubelet. + // +optional + PodPIDsLimit *int64 `json:"podPidsLimit,omitempty" protobuf:"varint,11,opt,name=podPidsLimit"` + // ImagePullProgressDeadline describes the time limit under which if no pulling progress is made, the image pulling will be cancelled. + // +optional + // Default: 1m + ImagePullProgressDeadline *metav1.Duration `json:"imagePullProgressDeadline,omitempty" protobuf:"bytes,12,opt,name=imagePullProgressDeadline"` + // FailSwapOn makes the Kubelet fail to start if swap is enabled on the node. (default true). + // +optional + FailSwapOn *bool `json:"failSwapOn,omitempty" protobuf:"varint,13,opt,name=failSwapOn"` + // KubeReserved is the configuration for resources reserved for kubernetes node components (mainly kubelet and container runtime). + // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied. + // +optional + // Default: cpu=80m,memory=1Gi,pid=20k + KubeReserved *KubeletConfigReserved `json:"kubeReserved,omitempty" protobuf:"bytes,14,opt,name=kubeReserved"` + // SystemReserved is the configuration for resources reserved for system processes not managed by kubernetes (e.g. journald). + // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied. + // +optional + SystemReserved *KubeletConfigReserved `json:"systemReserved,omitempty" protobuf:"bytes,15,opt,name=systemReserved"` +} + +// KubeletConfigEviction contains kubelet eviction thresholds supporting either a resource.Quantity or a percentage based value. +type KubeletConfigEviction struct { + // MemoryAvailable is the threshold for the free memory on the host server. + // +optional + MemoryAvailable *string `json:"memoryAvailable,omitempty" protobuf:"bytes,1,opt,name=memoryAvailable"` + // ImageFSAvailable is the threshold for the free disk space in the imagefs filesystem (docker images and container writable layers). + // +optional + ImageFSAvailable *string `json:"imageFSAvailable,omitempty" protobuf:"bytes,2,opt,name=imageFSAvailable"` + // ImageFSInodesFree is the threshold for the available inodes in the imagefs filesystem. + // +optional + ImageFSInodesFree *string `json:"imageFSInodesFree,omitempty" protobuf:"bytes,3,opt,name=imageFSInodesFree"` + // NodeFSAvailable is the threshold for the free disk space in the nodefs filesystem (docker volumes, logs, etc). + // +optional + NodeFSAvailable *string `json:"nodeFSAvailable,omitempty" protobuf:"bytes,4,opt,name=nodeFSAvailable"` + // NodeFSInodesFree is the threshold for the available inodes in the nodefs filesystem. + // +optional + NodeFSInodesFree *string `json:"nodeFSInodesFree,omitempty" protobuf:"bytes,5,opt,name=nodeFSInodesFree"` +} + +// KubeletConfigEvictionMinimumReclaim contains configuration for the kubelet eviction minimum reclaim. +type KubeletConfigEvictionMinimumReclaim struct { + // MemoryAvailable is the threshold for the memory reclaim on the host server. + // +optional + MemoryAvailable *resource.Quantity `json:"memoryAvailable,omitempty" protobuf:"bytes,1,opt,name=memoryAvailable"` + // ImageFSAvailable is the threshold for the disk space reclaim in the imagefs filesystem (docker images and container writable layers). + // +optional + ImageFSAvailable *resource.Quantity `json:"imageFSAvailable,omitempty" protobuf:"bytes,2,opt,name=imageFSAvailable"` + // ImageFSInodesFree is the threshold for the inodes reclaim in the imagefs filesystem. + // +optional + ImageFSInodesFree *resource.Quantity `json:"imageFSInodesFree,omitempty" protobuf:"bytes,3,opt,name=imageFSInodesFree"` + // NodeFSAvailable is the threshold for the disk space reclaim in the nodefs filesystem (docker volumes, logs, etc). + // +optional + NodeFSAvailable *resource.Quantity `json:"nodeFSAvailable,omitempty" protobuf:"bytes,4,opt,name=nodeFSAvailable"` + // NodeFSInodesFree is the threshold for the inodes reclaim in the nodefs filesystem. + // +optional + NodeFSInodesFree *resource.Quantity `json:"nodeFSInodesFree,omitempty" protobuf:"bytes,5,opt,name=nodeFSInodesFree"` +} + +// KubeletConfigEvictionSoftGracePeriod contains grace periods for kubelet eviction thresholds. +type KubeletConfigEvictionSoftGracePeriod struct { + // MemoryAvailable is the grace period for the MemoryAvailable eviction threshold. + // +optional + MemoryAvailable *metav1.Duration `json:"memoryAvailable,omitempty" protobuf:"bytes,1,opt,name=memoryAvailable"` + // ImageFSAvailable is the grace period for the ImageFSAvailable eviction threshold. + // +optional + ImageFSAvailable *metav1.Duration `json:"imageFSAvailable,omitempty" protobuf:"bytes,2,opt,name=imageFSAvailable"` + // ImageFSInodesFree is the grace period for the ImageFSInodesFree eviction threshold. + // +optional + ImageFSInodesFree *metav1.Duration `json:"imageFSInodesFree,omitempty" protobuf:"bytes,3,opt,name=imageFSInodesFree"` + // NodeFSAvailable is the grace period for the NodeFSAvailable eviction threshold. + // +optional + NodeFSAvailable *metav1.Duration `json:"nodeFSAvailable,omitempty" protobuf:"bytes,4,opt,name=nodeFSAvailable"` + // NodeFSInodesFree is the grace period for the NodeFSInodesFree eviction threshold. + // +optional + NodeFSInodesFree *metav1.Duration `json:"nodeFSInodesFree,omitempty" protobuf:"bytes,5,opt,name=nodeFSInodesFree"` +} + +// KubeletConfigReserved contains reserved resources for daemons +type KubeletConfigReserved struct { + // CPU is the reserved cpu. + // +optional + CPU *resource.Quantity `json:"cpu,omitempty" protobuf:"bytes,1,opt,name=cpu"` + // Memory is the reserved memory. + // +optional + Memory *resource.Quantity `json:"memory,omitempty" protobuf:"bytes,2,opt,name=memory"` + // EphemeralStorage is the reserved ephemeral-storage. + // +optional + EphemeralStorage *resource.Quantity `json:"ephemeralStorage,omitempty" protobuf:"bytes,3,opt,name=ephemeralStorage"` + // PID is the reserved process-ids. + // To reserve PID, the SupportNodePidsLimit feature gate must be enabled in Kubernetes versions < 1.15. + // +optional + PID *resource.Quantity `json:"pid,omitempty" protobuf:"bytes,4,opt,name=pid"` +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Networking relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Networking defines networking parameters for the shoot cluster. +type Networking struct { + // Type identifies the type of the networking plugin. + Type string `json:"type" protobuf:"bytes,1,opt,name=type"` + // ProviderConfig is the configuration passed to network resource. + // +optional + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` + // Pods is the CIDR of the pod network. + // +optional + Pods *string `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // Nodes is the CIDR of the entire node network. + // +optional + Nodes *string `json:"nodes,omitempty" protobuf:"bytes,4,opt,name=nodes"` + // Services is the CIDR of the service network. + // +optional + Services *string `json:"services,omitempty" protobuf:"bytes,5,opt,name=services"` +} + +const ( + // DefaultPodNetworkCIDR is a constant for the default pod network CIDR of a Shoot cluster. + DefaultPodNetworkCIDR = "100.96.0.0/11" + // DefaultServiceNetworkCIDR is a constant for the default service network CIDR of a Shoot cluster. + DefaultServiceNetworkCIDR = "100.64.0.0/13" +) + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Maintenance relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +const ( + // MaintenanceTimeWindowDurationMinimum is the minimum duration for a maintenance time window. + MaintenanceTimeWindowDurationMinimum = 30 * time.Minute + // MaintenanceTimeWindowDurationMaximum is the maximum duration for a maintenance time window. + MaintenanceTimeWindowDurationMaximum = 6 * time.Hour +) + +// Maintenance contains information about the time window for maintenance operations and which +// operations should be performed. +type Maintenance struct { + // AutoUpdate contains information about which constraints should be automatically updated. + // +optional + AutoUpdate *MaintenanceAutoUpdate `json:"autoUpdate,omitempty" protobuf:"bytes,1,opt,name=autoUpdate"` + // TimeWindow contains information about the time window for maintenance operations. + // +optional + TimeWindow *MaintenanceTimeWindow `json:"timeWindow,omitempty" protobuf:"bytes,2,opt,name=timeWindow"` + // ConfineSpecUpdateRollout prevents that changes/updates to the shoot specification will be rolled out immediately. + // Instead, they are rolled out during the shoot's maintenance time window. There is one exception that will trigger + // an immediate roll out which is changes to the Spec.Hibernation.Enabled field. + // +optional + ConfineSpecUpdateRollout *bool `json:"confineSpecUpdateRollout,omitempty" protobuf:"varint,3,opt,name=confineSpecUpdateRollout"` +} + +// MaintenanceAutoUpdate contains information about which constraints should be automatically updated. +type MaintenanceAutoUpdate struct { + // KubernetesVersion indicates whether the patch Kubernetes version may be automatically updated (default: true). + KubernetesVersion bool `json:"kubernetesVersion" protobuf:"varint,1,opt,name=kubernetesVersion"` + // MachineImageVersion indicates whether the machine image version may be automatically updated (default: true). + MachineImageVersion bool `json:"machineImageVersion" protobuf:"varint,2,opt,name=machineImageVersion"` +} + +// MaintenanceTimeWindow contains information about the time window for maintenance operations. +type MaintenanceTimeWindow struct { + // Begin is the beginning of the time window in the format HHMMSS+ZONE, e.g. "220000+0100". + // If not present, a random value will be computed. + Begin string `json:"begin" protobuf:"bytes,1,opt,name=begin"` + // End is the end of the time window in the format HHMMSS+ZONE, e.g. "220000+0100". + // If not present, the value will be computed based on the "Begin" value. + End string `json:"end" protobuf:"bytes,2,opt,name=end"` +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Monitoring relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Monitoring contains information about the monitoring configuration for the shoot. +type Monitoring struct { + // Alerting contains information about the alerting configuration for the shoot cluster. + // +optional + Alerting *Alerting `json:"alerting,omitempty" protobuf:"bytes,1,opt,name=alerting"` +} + +// Alerting contains information about how alerting will be done (i.e. who will receive alerts and how). +type Alerting struct { + // MonitoringEmailReceivers is a list of recipients for alerts + // +optional + EmailReceivers []string `json:"emailReceivers,omitempty" protobuf:"bytes,1,rep,name=emailReceivers"` +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Provider relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Provider contains provider-specific information that are handed-over to the provider-specific +// extension controller. +type Provider struct { + // Type is the type of the provider. + Type string `json:"type" protobuf:"bytes,1,opt,name=type"` + // ControlPlaneConfig contains the provider-specific control plane config blob. Please look up the concrete + // definition in the documentation of your provider extension. + // +optional + ControlPlaneConfig *runtime.RawExtension `json:"controlPlaneConfig,omitempty" protobuf:"bytes,2,opt,name=controlPlaneConfig"` + // InfrastructureConfig contains the provider-specific infrastructure config blob. Please look up the concrete + // definition in the documentation of your provider extension. + // +optional + InfrastructureConfig *runtime.RawExtension `json:"infrastructureConfig,omitempty" protobuf:"bytes,3,opt,name=infrastructureConfig"` + // Workers is a list of worker groups. + // +patchMergeKey=name + // +patchStrategy=merge + Workers []Worker `json:"workers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,4,rep,name=workers"` +} + +// Worker is the base definition of a worker group. +type Worker struct { + // Annotations is a map of key/value pairs for annotations for all the `Node` objects in this worker pool. + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,1,rep,name=annotations"` + // CABundle is a certificate bundle which will be installed onto every machine of this worker pool. + // +optional + CABundle *string `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` + // CRI contains configurations of CRI support of every machine in the worker pool + // +optional + CRI *CRI `json:"cri,omitempty" protobuf:"bytes,3,opt,name=cri"` + // Kubernetes contains configuration for Kubernetes components related to this worker pool. + // +optional + Kubernetes *WorkerKubernetes `json:"kubernetes,omitempty" protobuf:"bytes,4,opt,name=kubernetes"` + // Labels is a map of key/value pairs for labels for all the `Node` objects in this worker pool. + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,5,rep,name=labels"` + // Name is the name of the worker group. + Name string `json:"name" protobuf:"bytes,6,opt,name=name"` + // Machine contains information about the machine type and image. + Machine Machine `json:"machine" protobuf:"bytes,7,opt,name=machine"` + // Maximum is the maximum number of VMs to create. + Maximum int32 `json:"maximum" protobuf:"varint,8,opt,name=maximum"` + // Minimum is the minimum number of VMs to create. + Minimum int32 `json:"minimum" protobuf:"varint,9,opt,name=minimum"` + // MaxSurge is maximum number of VMs that are created during an update. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,10,opt,name=maxSurge"` + // MaxUnavailable is the maximum number of VMs that can be unavailable during an update. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,11,opt,name=maxUnavailable"` + // ProviderConfig is the provider-specific configuration for this worker pool. + // +optional + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,12,opt,name=providerConfig"` + // Taints is a list of taints for all the `Node` objects in this worker pool. + // +optional + Taints []corev1.Taint `json:"taints,omitempty" protobuf:"bytes,13,rep,name=taints"` + // Volume contains information about the volume type and size. + // +optional + Volume *Volume `json:"volume,omitempty" protobuf:"bytes,14,opt,name=volume"` + // DataVolumes contains a list of additional worker volumes. + // +optional + DataVolumes []DataVolume `json:"dataVolumes,omitempty" protobuf:"bytes,15,rep,name=dataVolumes"` + // KubeletDataVolumeName contains the name of a dataVolume that should be used for storing kubelet state. + // +optional + KubeletDataVolumeName *string `json:"kubeletDataVolumeName,omitempty" protobuf:"bytes,16,opt,name=kubeletDataVolumeName"` + // Zones is a list of availability zones that are used to evenly distribute this worker pool. Optional + // as not every provider may support availability zones. + // +optional + Zones []string `json:"zones,omitempty" protobuf:"bytes,17,rep,name=zones"` + // SystemComponents contains configuration for system components related to this worker pool + // +optional + SystemComponents *WorkerSystemComponents `json:"systemComponents,omitempty" protobuf:"bytes,18,opt,name=systemComponents"` + // MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout. + // +optional + MachineControllerManagerSettings *MachineControllerManagerSettings `json:"machineControllerManager,omitempty" protobuf:"bytes,19,opt,name=machineControllerManager"` +} + +// MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout. +type MachineControllerManagerSettings struct { + // MachineDrainTimeout is the period after which machine is forcefully deleted. + // +optional + MachineDrainTimeout *metav1.Duration `json:"machineDrainTimeout,omitempty" protobuf:"bytes,1,name=machineDrainTimeout"` + // MachineHealthTimeout is the period after which machine is declared failed. + // +optional + MachineHealthTimeout *metav1.Duration `json:"machineHealthTimeout,omitempty" protobuf:"bytes,2,name=machineHealthTimeout"` + // MachineCreationTimeout is the period after which creation of the machine is declared failed. + // +optional + MachineCreationTimeout *metav1.Duration `json:"machineCreationTimeout,omitempty" protobuf:"bytes,3,name=machineCreationTimeout"` + // MaxEvictRetries are the number of eviction retries on a pod after which drain is declared failed, and forceful deletion is triggered. + // +optional + MaxEvictRetries *int32 `json:"maxEvictRetries,omitempty" protobuf:"bytes,4,name=maxEvictRetries"` + // NodeConditions are the set of conditions if set to true for the period of MachineHealthTimeout, machine will be declared failed. + // +optional + NodeConditions []string `json:"nodeConditions,omitempty" protobuf:"bytes,5,name=nodeConditions"` +} + +// WorkerSystemComponents contains configuration for system components related to this worker pool +type WorkerSystemComponents struct { + // Allow determines whether the pool should be allowed to host system components or not (defaults to true) + Allow bool `json:"allow" protobuf:"bytes,1,name=allow"` +} + +// WorkerKubernetes contains configuration for Kubernetes components related to this worker pool. +type WorkerKubernetes struct { + // Kubelet contains configuration settings for all kubelets of this worker pool. + // +optional + Kubelet *KubeletConfig `json:"kubelet,omitempty" protobuf:"bytes,1,opt,name=kubelet"` +} + +// Machine contains information about the machine type and image. +type Machine struct { + // Type is the machine type of the worker group. + Type string `json:"type" protobuf:"bytes,1,opt,name=type"` + // Image holds information about the machine image to use for all nodes of this pool. It will default to the + // latest version of the first image stated in the referenced CloudProfile if no value has been provided. + // +optional + Image *ShootMachineImage `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` +} + +// ShootMachineImage defines the name and the version of the shoot's machine image in any environment. Has to be +// defined in the respective CloudProfile. +type ShootMachineImage struct { + // Name is the name of the image. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // ProviderConfig is the shoot's individual configuration passed to an extension resource. + // +optional + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` + // Version is the version of the shoot's image. + // If version is not provided, it will be defaulted to the latest version from the CloudProfile. + // +optional + Version *string `json:"version,omitempty" protobuf:"bytes,3,opt,name=version"` +} + +// Volume contains information about the volume type and size. +type Volume struct { + // Name of the volume to make it referencable. + // +optional + Name *string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // Type is the type of the volume. + // +optional + Type *string `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"` + // Size is the size of the volume. + VolumeSize string `json:"size" protobuf:"bytes,3,opt,name=size"` + // Encrypted determines if the volume should be encrypted. + // +optional + Encrypted *bool `json:"encrypted,omitempty" protobuf:"varint,4,opt,name=primary"` +} + +// DataVolume contains information about a data volume. +type DataVolume struct { + // Name of the volume to make it referencable. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Type is the type of the volume. + // +optional + Type *string `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"` + // VolumeSize is the size of the volume. + VolumeSize string `json:"size" protobuf:"bytes,3,opt,name=size"` + // Encrypted determines if the volume should be encrypted. + // +optional + Encrypted *bool `json:"encrypted,omitempty" protobuf:"varint,4,opt,name=encrypted"` +} + +// CRI contains information about the Container Runtimes. +type CRI struct { + // The name of the CRI library + Name CRIName `json:"name" protobuf:"bytes,1,opt,name=name"` + // ContainerRuntimes is the list of the required container runtimes supported for a worker pool. + // +optional + ContainerRuntimes []ContainerRuntime `json:"containerRuntimes,omitempty" protobuf:"bytes,2,rep,name=containerRuntimes"` +} + +// CRIName is a type alias for the CRI name string. +type CRIName string + +const ( + CRINameContainerD CRIName = "containerd" +) + +// ContainerRuntime contains information about worker's available container runtime +type ContainerRuntime struct { + // Type is the type of the Container Runtime. + Type string `json:"type" protobuf:"bytes,1,opt,name=name"` + // ProviderConfig is the configuration passed to the ContainerRuntime resource. + // +optional + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` +} + +var ( + // DefaultWorkerMaxSurge is the default value for Worker MaxSurge. + DefaultWorkerMaxSurge = intstr.FromInt(1) + // DefaultWorkerMaxUnavailable is the default value for Worker MaxUnavailable. + DefaultWorkerMaxUnavailable = intstr.FromInt(0) + // DefaultWorkerSystemComponentsAllow is the default value for Worker AllowSystemComponents + DefaultWorkerSystemComponentsAllow = true +) + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Other/miscellaneous constants and types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +const ( + // ShootEventImageVersionMaintenance indicates that a maintenance operation regarding the image version has been performed. + ShootEventImageVersionMaintenance = "MachineImageVersionMaintenance" + // ShootEventK8sVersionMaintenance indicates that a maintenance operation regarding the K8s version has been performed. + ShootEventK8sVersionMaintenance = "KubernetesVersionMaintenance" + // ShootEventHibernationEnabled indicates that hibernation started. + ShootEventHibernationEnabled = "Hibernated" + // ShootEventHibernationDisabled indicates that hibernation ended. + ShootEventHibernationDisabled = "WokenUp" + // ShootEventSchedulingSuccessful indicates that a scheduling decision was taken successfully. + ShootEventSchedulingSuccessful = "SchedulingSuccessful" + // ShootEventSchedulingFailed indicates that a scheduling decision failed. + ShootEventSchedulingFailed = "SchedulingFailed" +) + +const ( + // ShootAPIServerAvailable is a constant for a condition type indicating that the Shoot cluster's API server is available. + ShootAPIServerAvailable ConditionType = "APIServerAvailable" + // ShootControlPlaneHealthy is a constant for a condition type indicating the control plane health. + ShootControlPlaneHealthy ConditionType = "ControlPlaneHealthy" + // ShootEveryNodeReady is a constant for a condition type indicating the node health. + ShootEveryNodeReady ConditionType = "EveryNodeReady" + // ShootSystemComponentsHealthy is a constant for a condition type indicating the system components health. + ShootSystemComponentsHealthy ConditionType = "SystemComponentsHealthy" + // ShootHibernationPossible is a constant for a condition type indicating whether the Shoot can be hibernated. + ShootHibernationPossible ConditionType = "HibernationPossible" + // ShootMaintenancePreconditionsSatisfied is a constant for a condition type indicating whether all preconditions + // for a shoot maintenance operation are satisfied. + ShootMaintenancePreconditionsSatisfied ConditionType = "MaintenancePreconditionsSatisfied" +) + +// ShootPurpose is a type alias for string. +type ShootPurpose string + +const ( + // ShootPurposeEvaluation is a constant for the evaluation purpose. + ShootPurposeEvaluation ShootPurpose = "evaluation" + // ShootPurposeTesting is a constant for the testing purpose. + ShootPurposeTesting ShootPurpose = "testing" + // ShootPurposeDevelopment is a constant for the development purpose. + ShootPurposeDevelopment ShootPurpose = "development" + // ShootPurposeProduction is a constant for the production purpose. + ShootPurposeProduction ShootPurpose = "production" + // ShootPurposeInfrastructure is a constant for the infrastructure purpose. + ShootPurposeInfrastructure ShootPurpose = "infrastructure" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shootstate.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shootstate.go new file mode 100644 index 000000000..4586065ed --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shootstate.go @@ -0,0 +1,100 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + autoscalingv1 "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ShootState contains a snapshot of the Shoot's state required to migrate the Shoot's control plane to a new Seed. +type ShootState struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the ShootState. + // +optional + Spec ShootStateSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ShootStateList is a list of ShootState objects. +type ShootStateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of ShootStates. + Items []ShootState `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ShootStateSpec is the specification of the ShootState. +type ShootStateSpec struct { + // Gardener holds the data required to generate resources deployed by the gardenlet + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + Gardener []GardenerResourceData `json:"gardener,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=gardener"` + // Extensions holds the state of custom resources reconciled by extension controllers in the seed + // +optional + Extensions []ExtensionResourceState `json:"extensions,omitempty" protobuf:"bytes,2,rep,name=extensions"` + // Resources holds the data of resources referred to by extension controller states + // +optional + Resources []ResourceData `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` +} + +// GardenerResourceData holds the data which is used to generate resources, deployed in the Shoot's control plane. +type GardenerResourceData struct { + // Name of the object required to generate resources + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Type of the object + Type string `json:"type" protobuf:"bytes,2,opt,name=type"` + // Data contains the payload required to generate resources + Data runtime.RawExtension `json:"data" protobuf:"bytes,3,opt,name=data"` +} + +// ExtensionResourceState contains the kind of the extension custom resource and its last observed state in the Shoot's +// namespace on the Seed cluster. +type ExtensionResourceState struct { + // Kind (type) of the extension custom resource + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the extension custom resource + // +optional + Name *string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"` + // Purpose of the extension custom resource + // +optional + Purpose *string `json:"purpose,omitempty" protobuf:"bytes,3,opt,name=purpose"` + // State of the extension resource + // +optional + State *runtime.RawExtension `json:"state,omitempty" protobuf:"bytes,4,opt,name=state"` + // Resources holds a list of named resource references that can be referred to in the state by their names. + // +optional + Resources []gardencorev1beta1.NamedResourceReference `json:"resources,omitempty" protobuf:"bytes,5,rep,name=resources"` +} + +// ResourceData holds the data of a resource referred to by an extension controller state. +type ResourceData struct { + autoscalingv1.CrossVersionObjectReference `json:",inline" protobuf:"bytes,1,opt,name=ref"` + // Data of the resource + Data runtime.RawExtension `json:"data" protobuf:"bytes,2,opt,name=data"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_utils.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_utils.go new file mode 100644 index 000000000..a218d0b3e --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_utils.go @@ -0,0 +1,72 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // EventSchedulingSuccessful is an event reason for successful scheduling. + EventSchedulingSuccessful = "SchedulingSuccessful" + // EventSchedulingFailed is an event reason for failed scheduling. + EventSchedulingFailed = "SchedulingFailed" +) + +// ConditionStatus is the status of a condition. +type ConditionStatus string + +// ConditionType is a string alias. +type ConditionType string + +// Condition holds the information about the state of a resource. +type Condition struct { + // Type of the Shoot condition. + Type ConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ConditionType"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // Last time the condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime" protobuf:"bytes,4,opt,name=lastUpdateTime"` + // The reason for the condition's last transition. + Reason string `json:"reason" protobuf:"bytes,5,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message" protobuf:"bytes,6,opt,name=message"` + // Well-defined error codes in case the condition reports a problem. + // +optional + Codes []ErrorCode `json:"codes,omitempty" protobuf:"bytes,7,rep,name=codes,casttype=ErrorCode"` +} + +const ( + // ConditionTrue means a resource is in the condition. + ConditionTrue ConditionStatus = "True" + // ConditionFalse means a resource is not in the condition. + ConditionFalse ConditionStatus = "False" + // ConditionUnknown means Gardener can't decide if a resource is in the condition or not. + ConditionUnknown ConditionStatus = "Unknown" + // ConditionProgressing means the condition was seen true, failed but stayed within a predefined failure threshold. + // In the future, we could add other intermediate conditions, e.g. ConditionDegraded. + ConditionProgressing ConditionStatus = "Progressing" + + // ConditionCheckError is a constant for a reason in condition. + ConditionCheckError = "ConditionCheckError" + // ManagedResourceMissingConditionError is a constant for a reason in a condition that indicates + // one or multiple missing conditions in the observed managed resource. + ManagedResourceMissingConditionError = "MissingManagedResourceCondition" + // OutdatedStatusError is a constant for a reason in a condition that indicates + // that the observed generation in a status is outdated. + OutdatedStatusError = "OutdatedStatus" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.conversion.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.conversion.go new file mode 100644 index 000000000..6169e1309 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,5035 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + unsafe "unsafe" + + core "github.com/gardener/gardener/pkg/apis/core" + v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + v1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*Addon)(nil), (*core.Addon)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Addon_To_core_Addon(a.(*Addon), b.(*core.Addon), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Addon)(nil), (*Addon)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Addon_To_v1alpha1_Addon(a.(*core.Addon), b.(*Addon), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Addons)(nil), (*core.Addons)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Addons_To_core_Addons(a.(*Addons), b.(*core.Addons), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Addons)(nil), (*Addons)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Addons_To_v1alpha1_Addons(a.(*core.Addons), b.(*Addons), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AdmissionPlugin)(nil), (*core.AdmissionPlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AdmissionPlugin_To_core_AdmissionPlugin(a.(*AdmissionPlugin), b.(*core.AdmissionPlugin), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.AdmissionPlugin)(nil), (*AdmissionPlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AdmissionPlugin_To_v1alpha1_AdmissionPlugin(a.(*core.AdmissionPlugin), b.(*AdmissionPlugin), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Alerting)(nil), (*core.Alerting)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Alerting_To_core_Alerting(a.(*Alerting), b.(*core.Alerting), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Alerting)(nil), (*Alerting)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Alerting_To_v1alpha1_Alerting(a.(*core.Alerting), b.(*Alerting), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AuditConfig)(nil), (*core.AuditConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AuditConfig_To_core_AuditConfig(a.(*AuditConfig), b.(*core.AuditConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.AuditConfig)(nil), (*AuditConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AuditConfig_To_v1alpha1_AuditConfig(a.(*core.AuditConfig), b.(*AuditConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AuditPolicy)(nil), (*core.AuditPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AuditPolicy_To_core_AuditPolicy(a.(*AuditPolicy), b.(*core.AuditPolicy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.AuditPolicy)(nil), (*AuditPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AuditPolicy_To_v1alpha1_AuditPolicy(a.(*core.AuditPolicy), b.(*AuditPolicy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AvailabilityZone)(nil), (*core.AvailabilityZone)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AvailabilityZone_To_core_AvailabilityZone(a.(*AvailabilityZone), b.(*core.AvailabilityZone), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.AvailabilityZone)(nil), (*AvailabilityZone)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AvailabilityZone_To_v1alpha1_AvailabilityZone(a.(*core.AvailabilityZone), b.(*AvailabilityZone), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*BackupBucketList)(nil), (*core.BackupBucketList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_BackupBucketList_To_core_BackupBucketList(a.(*BackupBucketList), b.(*core.BackupBucketList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.BackupBucketList)(nil), (*BackupBucketList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupBucketList_To_v1alpha1_BackupBucketList(a.(*core.BackupBucketList), b.(*BackupBucketList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*BackupBucketProvider)(nil), (*core.BackupBucketProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_BackupBucketProvider_To_core_BackupBucketProvider(a.(*BackupBucketProvider), b.(*core.BackupBucketProvider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.BackupBucketProvider)(nil), (*BackupBucketProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupBucketProvider_To_v1alpha1_BackupBucketProvider(a.(*core.BackupBucketProvider), b.(*BackupBucketProvider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*BackupBucketStatus)(nil), (*core.BackupBucketStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_BackupBucketStatus_To_core_BackupBucketStatus(a.(*BackupBucketStatus), b.(*core.BackupBucketStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.BackupBucketStatus)(nil), (*BackupBucketStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupBucketStatus_To_v1alpha1_BackupBucketStatus(a.(*core.BackupBucketStatus), b.(*BackupBucketStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*BackupEntryList)(nil), (*core.BackupEntryList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_BackupEntryList_To_core_BackupEntryList(a.(*BackupEntryList), b.(*core.BackupEntryList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.BackupEntryList)(nil), (*BackupEntryList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupEntryList_To_v1alpha1_BackupEntryList(a.(*core.BackupEntryList), b.(*BackupEntryList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*BackupEntryStatus)(nil), (*core.BackupEntryStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_BackupEntryStatus_To_core_BackupEntryStatus(a.(*BackupEntryStatus), b.(*core.BackupEntryStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.BackupEntryStatus)(nil), (*BackupEntryStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupEntryStatus_To_v1alpha1_BackupEntryStatus(a.(*core.BackupEntryStatus), b.(*BackupEntryStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CRI)(nil), (*core.CRI)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_CRI_To_core_CRI(a.(*CRI), b.(*core.CRI), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.CRI)(nil), (*CRI)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CRI_To_v1alpha1_CRI(a.(*core.CRI), b.(*CRI), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CloudInfo)(nil), (*core.CloudInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_CloudInfo_To_core_CloudInfo(a.(*CloudInfo), b.(*core.CloudInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.CloudInfo)(nil), (*CloudInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CloudInfo_To_v1alpha1_CloudInfo(a.(*core.CloudInfo), b.(*CloudInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CloudProfile)(nil), (*core.CloudProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_CloudProfile_To_core_CloudProfile(a.(*CloudProfile), b.(*core.CloudProfile), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.CloudProfile)(nil), (*CloudProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CloudProfile_To_v1alpha1_CloudProfile(a.(*core.CloudProfile), b.(*CloudProfile), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CloudProfileList)(nil), (*core.CloudProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_CloudProfileList_To_core_CloudProfileList(a.(*CloudProfileList), b.(*core.CloudProfileList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.CloudProfileList)(nil), (*CloudProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CloudProfileList_To_v1alpha1_CloudProfileList(a.(*core.CloudProfileList), b.(*CloudProfileList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CloudProfileSpec)(nil), (*core.CloudProfileSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_CloudProfileSpec_To_core_CloudProfileSpec(a.(*CloudProfileSpec), b.(*core.CloudProfileSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.CloudProfileSpec)(nil), (*CloudProfileSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CloudProfileSpec_To_v1alpha1_CloudProfileSpec(a.(*core.CloudProfileSpec), b.(*CloudProfileSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClusterAutoscaler)(nil), (*core.ClusterAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClusterAutoscaler_To_core_ClusterAutoscaler(a.(*ClusterAutoscaler), b.(*core.ClusterAutoscaler), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ClusterAutoscaler)(nil), (*ClusterAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ClusterAutoscaler_To_v1alpha1_ClusterAutoscaler(a.(*core.ClusterAutoscaler), b.(*ClusterAutoscaler), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClusterInfo)(nil), (*core.ClusterInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClusterInfo_To_core_ClusterInfo(a.(*ClusterInfo), b.(*core.ClusterInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ClusterInfo)(nil), (*ClusterInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ClusterInfo_To_v1alpha1_ClusterInfo(a.(*core.ClusterInfo), b.(*ClusterInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Condition)(nil), (*core.Condition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Condition_To_core_Condition(a.(*Condition), b.(*core.Condition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Condition)(nil), (*Condition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Condition_To_v1alpha1_Condition(a.(*core.Condition), b.(*Condition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ContainerRuntime)(nil), (*core.ContainerRuntime)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ContainerRuntime_To_core_ContainerRuntime(a.(*ContainerRuntime), b.(*core.ContainerRuntime), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ContainerRuntime)(nil), (*ContainerRuntime)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ContainerRuntime_To_v1alpha1_ContainerRuntime(a.(*core.ContainerRuntime), b.(*ContainerRuntime), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerDeployment)(nil), (*core.ControllerDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ControllerDeployment_To_core_ControllerDeployment(a.(*ControllerDeployment), b.(*core.ControllerDeployment), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ControllerDeployment)(nil), (*ControllerDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ControllerDeployment_To_v1alpha1_ControllerDeployment(a.(*core.ControllerDeployment), b.(*ControllerDeployment), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerInstallation)(nil), (*core.ControllerInstallation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ControllerInstallation_To_core_ControllerInstallation(a.(*ControllerInstallation), b.(*core.ControllerInstallation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ControllerInstallation)(nil), (*ControllerInstallation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ControllerInstallation_To_v1alpha1_ControllerInstallation(a.(*core.ControllerInstallation), b.(*ControllerInstallation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerInstallationList)(nil), (*core.ControllerInstallationList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ControllerInstallationList_To_core_ControllerInstallationList(a.(*ControllerInstallationList), b.(*core.ControllerInstallationList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ControllerInstallationList)(nil), (*ControllerInstallationList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ControllerInstallationList_To_v1alpha1_ControllerInstallationList(a.(*core.ControllerInstallationList), b.(*ControllerInstallationList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerInstallationSpec)(nil), (*core.ControllerInstallationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(a.(*ControllerInstallationSpec), b.(*core.ControllerInstallationSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ControllerInstallationSpec)(nil), (*ControllerInstallationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ControllerInstallationSpec_To_v1alpha1_ControllerInstallationSpec(a.(*core.ControllerInstallationSpec), b.(*ControllerInstallationSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerInstallationStatus)(nil), (*core.ControllerInstallationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(a.(*ControllerInstallationStatus), b.(*core.ControllerInstallationStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ControllerInstallationStatus)(nil), (*ControllerInstallationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ControllerInstallationStatus_To_v1alpha1_ControllerInstallationStatus(a.(*core.ControllerInstallationStatus), b.(*ControllerInstallationStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerRegistration)(nil), (*core.ControllerRegistration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ControllerRegistration_To_core_ControllerRegistration(a.(*ControllerRegistration), b.(*core.ControllerRegistration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ControllerRegistration)(nil), (*ControllerRegistration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ControllerRegistration_To_v1alpha1_ControllerRegistration(a.(*core.ControllerRegistration), b.(*ControllerRegistration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerRegistrationList)(nil), (*core.ControllerRegistrationList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ControllerRegistrationList_To_core_ControllerRegistrationList(a.(*ControllerRegistrationList), b.(*core.ControllerRegistrationList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ControllerRegistrationList)(nil), (*ControllerRegistrationList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ControllerRegistrationList_To_v1alpha1_ControllerRegistrationList(a.(*core.ControllerRegistrationList), b.(*ControllerRegistrationList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerRegistrationSpec)(nil), (*core.ControllerRegistrationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(a.(*ControllerRegistrationSpec), b.(*core.ControllerRegistrationSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ControllerRegistrationSpec)(nil), (*ControllerRegistrationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ControllerRegistrationSpec_To_v1alpha1_ControllerRegistrationSpec(a.(*core.ControllerRegistrationSpec), b.(*ControllerRegistrationSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerResource)(nil), (*core.ControllerResource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ControllerResource_To_core_ControllerResource(a.(*ControllerResource), b.(*core.ControllerResource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ControllerResource)(nil), (*ControllerResource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ControllerResource_To_v1alpha1_ControllerResource(a.(*core.ControllerResource), b.(*ControllerResource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*DNS)(nil), (*core.DNS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_DNS_To_core_DNS(a.(*DNS), b.(*core.DNS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.DNS)(nil), (*DNS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_DNS_To_v1alpha1_DNS(a.(*core.DNS), b.(*DNS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*DNSIncludeExclude)(nil), (*core.DNSIncludeExclude)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_DNSIncludeExclude_To_core_DNSIncludeExclude(a.(*DNSIncludeExclude), b.(*core.DNSIncludeExclude), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.DNSIncludeExclude)(nil), (*DNSIncludeExclude)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_DNSIncludeExclude_To_v1alpha1_DNSIncludeExclude(a.(*core.DNSIncludeExclude), b.(*DNSIncludeExclude), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*DNSProvider)(nil), (*core.DNSProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_DNSProvider_To_core_DNSProvider(a.(*DNSProvider), b.(*core.DNSProvider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.DNSProvider)(nil), (*DNSProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_DNSProvider_To_v1alpha1_DNSProvider(a.(*core.DNSProvider), b.(*DNSProvider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*DataVolume)(nil), (*core.DataVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_DataVolume_To_core_DataVolume(a.(*DataVolume), b.(*core.DataVolume), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.DataVolume)(nil), (*DataVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_DataVolume_To_v1alpha1_DataVolume(a.(*core.DataVolume), b.(*DataVolume), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Endpoint)(nil), (*core.Endpoint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Endpoint_To_core_Endpoint(a.(*Endpoint), b.(*core.Endpoint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Endpoint)(nil), (*Endpoint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Endpoint_To_v1alpha1_Endpoint(a.(*core.Endpoint), b.(*Endpoint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExpirableVersion)(nil), (*core.ExpirableVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ExpirableVersion_To_core_ExpirableVersion(a.(*ExpirableVersion), b.(*core.ExpirableVersion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ExpirableVersion)(nil), (*ExpirableVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ExpirableVersion_To_v1alpha1_ExpirableVersion(a.(*core.ExpirableVersion), b.(*ExpirableVersion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Extension)(nil), (*core.Extension)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Extension_To_core_Extension(a.(*Extension), b.(*core.Extension), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Extension)(nil), (*Extension)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Extension_To_v1alpha1_Extension(a.(*core.Extension), b.(*Extension), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExtensionResourceState)(nil), (*core.ExtensionResourceState)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ExtensionResourceState_To_core_ExtensionResourceState(a.(*ExtensionResourceState), b.(*core.ExtensionResourceState), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ExtensionResourceState)(nil), (*ExtensionResourceState)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ExtensionResourceState_To_v1alpha1_ExtensionResourceState(a.(*core.ExtensionResourceState), b.(*ExtensionResourceState), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Gardener)(nil), (*core.Gardener)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Gardener_To_core_Gardener(a.(*Gardener), b.(*core.Gardener), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Gardener)(nil), (*Gardener)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Gardener_To_v1alpha1_Gardener(a.(*core.Gardener), b.(*Gardener), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*GardenerResourceData)(nil), (*core.GardenerResourceData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_GardenerResourceData_To_core_GardenerResourceData(a.(*GardenerResourceData), b.(*core.GardenerResourceData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.GardenerResourceData)(nil), (*GardenerResourceData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_GardenerResourceData_To_v1alpha1_GardenerResourceData(a.(*core.GardenerResourceData), b.(*GardenerResourceData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Hibernation)(nil), (*core.Hibernation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Hibernation_To_core_Hibernation(a.(*Hibernation), b.(*core.Hibernation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Hibernation)(nil), (*Hibernation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Hibernation_To_v1alpha1_Hibernation(a.(*core.Hibernation), b.(*Hibernation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*HibernationSchedule)(nil), (*core.HibernationSchedule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_HibernationSchedule_To_core_HibernationSchedule(a.(*HibernationSchedule), b.(*core.HibernationSchedule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.HibernationSchedule)(nil), (*HibernationSchedule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_HibernationSchedule_To_v1alpha1_HibernationSchedule(a.(*core.HibernationSchedule), b.(*HibernationSchedule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*HorizontalPodAutoscalerConfig)(nil), (*core.HorizontalPodAutoscalerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig(a.(*HorizontalPodAutoscalerConfig), b.(*core.HorizontalPodAutoscalerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.HorizontalPodAutoscalerConfig)(nil), (*HorizontalPodAutoscalerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_HorizontalPodAutoscalerConfig_To_v1alpha1_HorizontalPodAutoscalerConfig(a.(*core.HorizontalPodAutoscalerConfig), b.(*HorizontalPodAutoscalerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Ingress)(nil), (*core.Ingress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Ingress_To_core_Ingress(a.(*Ingress), b.(*core.Ingress), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Ingress)(nil), (*Ingress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Ingress_To_v1alpha1_Ingress(a.(*core.Ingress), b.(*Ingress), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*IngressController)(nil), (*core.IngressController)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_IngressController_To_core_IngressController(a.(*IngressController), b.(*core.IngressController), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.IngressController)(nil), (*IngressController)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_IngressController_To_v1alpha1_IngressController(a.(*core.IngressController), b.(*IngressController), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubeAPIServerConfig)(nil), (*core.KubeAPIServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_KubeAPIServerConfig_To_core_KubeAPIServerConfig(a.(*KubeAPIServerConfig), b.(*core.KubeAPIServerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeAPIServerConfig)(nil), (*KubeAPIServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeAPIServerConfig_To_v1alpha1_KubeAPIServerConfig(a.(*core.KubeAPIServerConfig), b.(*KubeAPIServerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubeAPIServerRequests)(nil), (*core.KubeAPIServerRequests)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_KubeAPIServerRequests_To_core_KubeAPIServerRequests(a.(*KubeAPIServerRequests), b.(*core.KubeAPIServerRequests), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeAPIServerRequests)(nil), (*KubeAPIServerRequests)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeAPIServerRequests_To_v1alpha1_KubeAPIServerRequests(a.(*core.KubeAPIServerRequests), b.(*KubeAPIServerRequests), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubeControllerManagerConfig)(nil), (*core.KubeControllerManagerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig(a.(*KubeControllerManagerConfig), b.(*core.KubeControllerManagerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeControllerManagerConfig)(nil), (*KubeControllerManagerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeControllerManagerConfig_To_v1alpha1_KubeControllerManagerConfig(a.(*core.KubeControllerManagerConfig), b.(*KubeControllerManagerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubeProxyConfig)(nil), (*core.KubeProxyConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_KubeProxyConfig_To_core_KubeProxyConfig(a.(*KubeProxyConfig), b.(*core.KubeProxyConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeProxyConfig)(nil), (*KubeProxyConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeProxyConfig_To_v1alpha1_KubeProxyConfig(a.(*core.KubeProxyConfig), b.(*KubeProxyConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubeSchedulerConfig)(nil), (*core.KubeSchedulerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_KubeSchedulerConfig_To_core_KubeSchedulerConfig(a.(*KubeSchedulerConfig), b.(*core.KubeSchedulerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeSchedulerConfig)(nil), (*KubeSchedulerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(a.(*core.KubeSchedulerConfig), b.(*KubeSchedulerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubeletConfig)(nil), (*core.KubeletConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_KubeletConfig_To_core_KubeletConfig(a.(*KubeletConfig), b.(*core.KubeletConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeletConfig)(nil), (*KubeletConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeletConfig_To_v1alpha1_KubeletConfig(a.(*core.KubeletConfig), b.(*KubeletConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubeletConfigEviction)(nil), (*core.KubeletConfigEviction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_KubeletConfigEviction_To_core_KubeletConfigEviction(a.(*KubeletConfigEviction), b.(*core.KubeletConfigEviction), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeletConfigEviction)(nil), (*KubeletConfigEviction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeletConfigEviction_To_v1alpha1_KubeletConfigEviction(a.(*core.KubeletConfigEviction), b.(*KubeletConfigEviction), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubeletConfigEvictionMinimumReclaim)(nil), (*core.KubeletConfigEvictionMinimumReclaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim(a.(*KubeletConfigEvictionMinimumReclaim), b.(*core.KubeletConfigEvictionMinimumReclaim), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeletConfigEvictionMinimumReclaim)(nil), (*KubeletConfigEvictionMinimumReclaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeletConfigEvictionMinimumReclaim_To_v1alpha1_KubeletConfigEvictionMinimumReclaim(a.(*core.KubeletConfigEvictionMinimumReclaim), b.(*KubeletConfigEvictionMinimumReclaim), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubeletConfigEvictionSoftGracePeriod)(nil), (*core.KubeletConfigEvictionSoftGracePeriod)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod(a.(*KubeletConfigEvictionSoftGracePeriod), b.(*core.KubeletConfigEvictionSoftGracePeriod), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeletConfigEvictionSoftGracePeriod)(nil), (*KubeletConfigEvictionSoftGracePeriod)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeletConfigEvictionSoftGracePeriod_To_v1alpha1_KubeletConfigEvictionSoftGracePeriod(a.(*core.KubeletConfigEvictionSoftGracePeriod), b.(*KubeletConfigEvictionSoftGracePeriod), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubeletConfigReserved)(nil), (*core.KubeletConfigReserved)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_KubeletConfigReserved_To_core_KubeletConfigReserved(a.(*KubeletConfigReserved), b.(*core.KubeletConfigReserved), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeletConfigReserved)(nil), (*KubeletConfigReserved)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeletConfigReserved_To_v1alpha1_KubeletConfigReserved(a.(*core.KubeletConfigReserved), b.(*KubeletConfigReserved), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Kubernetes)(nil), (*core.Kubernetes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Kubernetes_To_core_Kubernetes(a.(*Kubernetes), b.(*core.Kubernetes), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Kubernetes)(nil), (*Kubernetes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Kubernetes_To_v1alpha1_Kubernetes(a.(*core.Kubernetes), b.(*Kubernetes), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubernetesConfig)(nil), (*core.KubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig(a.(*KubernetesConfig), b.(*core.KubernetesConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubernetesConfig)(nil), (*KubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig(a.(*core.KubernetesConfig), b.(*KubernetesConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubernetesDashboard)(nil), (*core.KubernetesDashboard)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_KubernetesDashboard_To_core_KubernetesDashboard(a.(*KubernetesDashboard), b.(*core.KubernetesDashboard), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubernetesDashboard)(nil), (*KubernetesDashboard)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubernetesDashboard_To_v1alpha1_KubernetesDashboard(a.(*core.KubernetesDashboard), b.(*KubernetesDashboard), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubernetesInfo)(nil), (*core.KubernetesInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_KubernetesInfo_To_core_KubernetesInfo(a.(*KubernetesInfo), b.(*core.KubernetesInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubernetesInfo)(nil), (*KubernetesInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubernetesInfo_To_v1alpha1_KubernetesInfo(a.(*core.KubernetesInfo), b.(*KubernetesInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubernetesSettings)(nil), (*core.KubernetesSettings)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_KubernetesSettings_To_core_KubernetesSettings(a.(*KubernetesSettings), b.(*core.KubernetesSettings), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubernetesSettings)(nil), (*KubernetesSettings)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubernetesSettings_To_v1alpha1_KubernetesSettings(a.(*core.KubernetesSettings), b.(*KubernetesSettings), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*LastError)(nil), (*core.LastError)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_LastError_To_core_LastError(a.(*LastError), b.(*core.LastError), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.LastError)(nil), (*LastError)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LastError_To_v1alpha1_LastError(a.(*core.LastError), b.(*LastError), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*LastOperation)(nil), (*core.LastOperation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_LastOperation_To_core_LastOperation(a.(*LastOperation), b.(*core.LastOperation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.LastOperation)(nil), (*LastOperation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LastOperation_To_v1alpha1_LastOperation(a.(*core.LastOperation), b.(*LastOperation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Machine)(nil), (*core.Machine)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Machine_To_core_Machine(a.(*Machine), b.(*core.Machine), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Machine)(nil), (*Machine)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Machine_To_v1alpha1_Machine(a.(*core.Machine), b.(*Machine), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*MachineControllerManagerSettings)(nil), (*core.MachineControllerManagerSettings)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings(a.(*MachineControllerManagerSettings), b.(*core.MachineControllerManagerSettings), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.MachineControllerManagerSettings)(nil), (*MachineControllerManagerSettings)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_MachineControllerManagerSettings_To_v1alpha1_MachineControllerManagerSettings(a.(*core.MachineControllerManagerSettings), b.(*MachineControllerManagerSettings), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*MachineImage)(nil), (*core.MachineImage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_MachineImage_To_core_MachineImage(a.(*MachineImage), b.(*core.MachineImage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.MachineImage)(nil), (*MachineImage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_MachineImage_To_v1alpha1_MachineImage(a.(*core.MachineImage), b.(*MachineImage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*MachineImageVersion)(nil), (*core.MachineImageVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_MachineImageVersion_To_core_MachineImageVersion(a.(*MachineImageVersion), b.(*core.MachineImageVersion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.MachineImageVersion)(nil), (*MachineImageVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_MachineImageVersion_To_v1alpha1_MachineImageVersion(a.(*core.MachineImageVersion), b.(*MachineImageVersion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*MachineType)(nil), (*core.MachineType)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_MachineType_To_core_MachineType(a.(*MachineType), b.(*core.MachineType), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.MachineType)(nil), (*MachineType)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_MachineType_To_v1alpha1_MachineType(a.(*core.MachineType), b.(*MachineType), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*MachineTypeStorage)(nil), (*core.MachineTypeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_MachineTypeStorage_To_core_MachineTypeStorage(a.(*MachineTypeStorage), b.(*core.MachineTypeStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.MachineTypeStorage)(nil), (*MachineTypeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_MachineTypeStorage_To_v1alpha1_MachineTypeStorage(a.(*core.MachineTypeStorage), b.(*MachineTypeStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Maintenance)(nil), (*core.Maintenance)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Maintenance_To_core_Maintenance(a.(*Maintenance), b.(*core.Maintenance), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Maintenance)(nil), (*Maintenance)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Maintenance_To_v1alpha1_Maintenance(a.(*core.Maintenance), b.(*Maintenance), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*MaintenanceAutoUpdate)(nil), (*core.MaintenanceAutoUpdate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate(a.(*MaintenanceAutoUpdate), b.(*core.MaintenanceAutoUpdate), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.MaintenanceAutoUpdate)(nil), (*MaintenanceAutoUpdate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_MaintenanceAutoUpdate_To_v1alpha1_MaintenanceAutoUpdate(a.(*core.MaintenanceAutoUpdate), b.(*MaintenanceAutoUpdate), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*MaintenanceTimeWindow)(nil), (*core.MaintenanceTimeWindow)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow(a.(*MaintenanceTimeWindow), b.(*core.MaintenanceTimeWindow), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.MaintenanceTimeWindow)(nil), (*MaintenanceTimeWindow)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_MaintenanceTimeWindow_To_v1alpha1_MaintenanceTimeWindow(a.(*core.MaintenanceTimeWindow), b.(*MaintenanceTimeWindow), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Monitoring)(nil), (*core.Monitoring)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Monitoring_To_core_Monitoring(a.(*Monitoring), b.(*core.Monitoring), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Monitoring)(nil), (*Monitoring)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Monitoring_To_v1alpha1_Monitoring(a.(*core.Monitoring), b.(*Monitoring), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NamedResourceReference)(nil), (*core.NamedResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NamedResourceReference_To_core_NamedResourceReference(a.(*NamedResourceReference), b.(*core.NamedResourceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NamedResourceReference)(nil), (*NamedResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NamedResourceReference_To_v1alpha1_NamedResourceReference(a.(*core.NamedResourceReference), b.(*NamedResourceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Networking)(nil), (*core.Networking)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Networking_To_core_Networking(a.(*Networking), b.(*core.Networking), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Networking)(nil), (*Networking)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Networking_To_v1alpha1_Networking(a.(*core.Networking), b.(*Networking), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NginxIngress)(nil), (*core.NginxIngress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NginxIngress_To_core_NginxIngress(a.(*NginxIngress), b.(*core.NginxIngress), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NginxIngress)(nil), (*NginxIngress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NginxIngress_To_v1alpha1_NginxIngress(a.(*core.NginxIngress), b.(*NginxIngress), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*OIDCConfig)(nil), (*core.OIDCConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_OIDCConfig_To_core_OIDCConfig(a.(*OIDCConfig), b.(*core.OIDCConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.OIDCConfig)(nil), (*OIDCConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_OIDCConfig_To_v1alpha1_OIDCConfig(a.(*core.OIDCConfig), b.(*OIDCConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*OpenIDConnectClientAuthentication)(nil), (*core.OpenIDConnectClientAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication(a.(*OpenIDConnectClientAuthentication), b.(*core.OpenIDConnectClientAuthentication), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.OpenIDConnectClientAuthentication)(nil), (*OpenIDConnectClientAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_OpenIDConnectClientAuthentication_To_v1alpha1_OpenIDConnectClientAuthentication(a.(*core.OpenIDConnectClientAuthentication), b.(*OpenIDConnectClientAuthentication), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Plant)(nil), (*core.Plant)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Plant_To_core_Plant(a.(*Plant), b.(*core.Plant), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Plant)(nil), (*Plant)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Plant_To_v1alpha1_Plant(a.(*core.Plant), b.(*Plant), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PlantList)(nil), (*core.PlantList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_PlantList_To_core_PlantList(a.(*PlantList), b.(*core.PlantList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PlantList)(nil), (*PlantList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PlantList_To_v1alpha1_PlantList(a.(*core.PlantList), b.(*PlantList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PlantSpec)(nil), (*core.PlantSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_PlantSpec_To_core_PlantSpec(a.(*PlantSpec), b.(*core.PlantSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PlantSpec)(nil), (*PlantSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PlantSpec_To_v1alpha1_PlantSpec(a.(*core.PlantSpec), b.(*PlantSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PlantStatus)(nil), (*core.PlantStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_PlantStatus_To_core_PlantStatus(a.(*PlantStatus), b.(*core.PlantStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PlantStatus)(nil), (*PlantStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PlantStatus_To_v1alpha1_PlantStatus(a.(*core.PlantStatus), b.(*PlantStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Project)(nil), (*core.Project)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Project_To_core_Project(a.(*Project), b.(*core.Project), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Project)(nil), (*Project)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Project_To_v1alpha1_Project(a.(*core.Project), b.(*Project), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ProjectList)(nil), (*core.ProjectList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ProjectList_To_core_ProjectList(a.(*ProjectList), b.(*core.ProjectList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ProjectList)(nil), (*ProjectList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ProjectList_To_v1alpha1_ProjectList(a.(*core.ProjectList), b.(*ProjectList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ProjectStatus)(nil), (*core.ProjectStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ProjectStatus_To_core_ProjectStatus(a.(*ProjectStatus), b.(*core.ProjectStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ProjectStatus)(nil), (*ProjectStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ProjectStatus_To_v1alpha1_ProjectStatus(a.(*core.ProjectStatus), b.(*ProjectStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ProjectTolerations)(nil), (*core.ProjectTolerations)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ProjectTolerations_To_core_ProjectTolerations(a.(*ProjectTolerations), b.(*core.ProjectTolerations), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ProjectTolerations)(nil), (*ProjectTolerations)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ProjectTolerations_To_v1alpha1_ProjectTolerations(a.(*core.ProjectTolerations), b.(*ProjectTolerations), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Provider)(nil), (*core.Provider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Provider_To_core_Provider(a.(*Provider), b.(*core.Provider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Provider)(nil), (*Provider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Provider_To_v1alpha1_Provider(a.(*core.Provider), b.(*Provider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Quota)(nil), (*core.Quota)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Quota_To_core_Quota(a.(*Quota), b.(*core.Quota), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Quota)(nil), (*Quota)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Quota_To_v1alpha1_Quota(a.(*core.Quota), b.(*Quota), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*QuotaList)(nil), (*core.QuotaList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_QuotaList_To_core_QuotaList(a.(*QuotaList), b.(*core.QuotaList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.QuotaList)(nil), (*QuotaList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_QuotaList_To_v1alpha1_QuotaList(a.(*core.QuotaList), b.(*QuotaList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*QuotaSpec)(nil), (*core.QuotaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_QuotaSpec_To_core_QuotaSpec(a.(*QuotaSpec), b.(*core.QuotaSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.QuotaSpec)(nil), (*QuotaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_QuotaSpec_To_v1alpha1_QuotaSpec(a.(*core.QuotaSpec), b.(*QuotaSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Region)(nil), (*core.Region)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Region_To_core_Region(a.(*Region), b.(*core.Region), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Region)(nil), (*Region)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Region_To_v1alpha1_Region(a.(*core.Region), b.(*Region), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ResourceData)(nil), (*core.ResourceData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ResourceData_To_core_ResourceData(a.(*ResourceData), b.(*core.ResourceData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ResourceData)(nil), (*ResourceData)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ResourceData_To_v1alpha1_ResourceData(a.(*core.ResourceData), b.(*ResourceData), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ResourceWatchCacheSize)(nil), (*core.ResourceWatchCacheSize)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize(a.(*ResourceWatchCacheSize), b.(*core.ResourceWatchCacheSize), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ResourceWatchCacheSize)(nil), (*ResourceWatchCacheSize)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ResourceWatchCacheSize_To_v1alpha1_ResourceWatchCacheSize(a.(*core.ResourceWatchCacheSize), b.(*ResourceWatchCacheSize), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SecretBinding)(nil), (*core.SecretBinding)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SecretBinding_To_core_SecretBinding(a.(*SecretBinding), b.(*core.SecretBinding), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SecretBinding)(nil), (*SecretBinding)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SecretBinding_To_v1alpha1_SecretBinding(a.(*core.SecretBinding), b.(*SecretBinding), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SecretBindingList)(nil), (*core.SecretBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SecretBindingList_To_core_SecretBindingList(a.(*SecretBindingList), b.(*core.SecretBindingList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SecretBindingList)(nil), (*SecretBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SecretBindingList_To_v1alpha1_SecretBindingList(a.(*core.SecretBindingList), b.(*SecretBindingList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedBackup)(nil), (*core.SeedBackup)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedBackup_To_core_SeedBackup(a.(*SeedBackup), b.(*core.SeedBackup), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedBackup)(nil), (*SeedBackup)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedBackup_To_v1alpha1_SeedBackup(a.(*core.SeedBackup), b.(*SeedBackup), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedDNS)(nil), (*core.SeedDNS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedDNS_To_core_SeedDNS(a.(*SeedDNS), b.(*core.SeedDNS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedDNS)(nil), (*SeedDNS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedDNS_To_v1alpha1_SeedDNS(a.(*core.SeedDNS), b.(*SeedDNS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedDNSProvider)(nil), (*core.SeedDNSProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedDNSProvider_To_core_SeedDNSProvider(a.(*SeedDNSProvider), b.(*core.SeedDNSProvider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedDNSProvider)(nil), (*SeedDNSProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedDNSProvider_To_v1alpha1_SeedDNSProvider(a.(*core.SeedDNSProvider), b.(*SeedDNSProvider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedList)(nil), (*core.SeedList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedList_To_core_SeedList(a.(*SeedList), b.(*core.SeedList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedList)(nil), (*SeedList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedList_To_v1alpha1_SeedList(a.(*core.SeedList), b.(*SeedList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedProvider)(nil), (*core.SeedProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedProvider_To_core_SeedProvider(a.(*SeedProvider), b.(*core.SeedProvider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedProvider)(nil), (*SeedProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedProvider_To_v1alpha1_SeedProvider(a.(*core.SeedProvider), b.(*SeedProvider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedSelector)(nil), (*core.SeedSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedSelector_To_core_SeedSelector(a.(*SeedSelector), b.(*core.SeedSelector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedSelector)(nil), (*SeedSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedSelector_To_v1alpha1_SeedSelector(a.(*core.SeedSelector), b.(*SeedSelector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedSettingExcessCapacityReservation)(nil), (*core.SeedSettingExcessCapacityReservation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation(a.(*SeedSettingExcessCapacityReservation), b.(*core.SeedSettingExcessCapacityReservation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedSettingExcessCapacityReservation)(nil), (*SeedSettingExcessCapacityReservation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedSettingExcessCapacityReservation_To_v1alpha1_SeedSettingExcessCapacityReservation(a.(*core.SeedSettingExcessCapacityReservation), b.(*SeedSettingExcessCapacityReservation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedSettingLoadBalancerServices)(nil), (*core.SeedSettingLoadBalancerServices)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices(a.(*SeedSettingLoadBalancerServices), b.(*core.SeedSettingLoadBalancerServices), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedSettingLoadBalancerServices)(nil), (*SeedSettingLoadBalancerServices)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedSettingLoadBalancerServices_To_v1alpha1_SeedSettingLoadBalancerServices(a.(*core.SeedSettingLoadBalancerServices), b.(*SeedSettingLoadBalancerServices), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedSettingScheduling)(nil), (*core.SeedSettingScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedSettingScheduling_To_core_SeedSettingScheduling(a.(*SeedSettingScheduling), b.(*core.SeedSettingScheduling), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedSettingScheduling)(nil), (*SeedSettingScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedSettingScheduling_To_v1alpha1_SeedSettingScheduling(a.(*core.SeedSettingScheduling), b.(*SeedSettingScheduling), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedSettingShootDNS)(nil), (*core.SeedSettingShootDNS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedSettingShootDNS_To_core_SeedSettingShootDNS(a.(*SeedSettingShootDNS), b.(*core.SeedSettingShootDNS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedSettingShootDNS)(nil), (*SeedSettingShootDNS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedSettingShootDNS_To_v1alpha1_SeedSettingShootDNS(a.(*core.SeedSettingShootDNS), b.(*SeedSettingShootDNS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedSettingVerticalPodAutoscaler)(nil), (*core.SeedSettingVerticalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler(a.(*SeedSettingVerticalPodAutoscaler), b.(*core.SeedSettingVerticalPodAutoscaler), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedSettingVerticalPodAutoscaler)(nil), (*SeedSettingVerticalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedSettingVerticalPodAutoscaler_To_v1alpha1_SeedSettingVerticalPodAutoscaler(a.(*core.SeedSettingVerticalPodAutoscaler), b.(*SeedSettingVerticalPodAutoscaler), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedSettings)(nil), (*core.SeedSettings)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedSettings_To_core_SeedSettings(a.(*SeedSettings), b.(*core.SeedSettings), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedSettings)(nil), (*SeedSettings)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedSettings_To_v1alpha1_SeedSettings(a.(*core.SeedSettings), b.(*SeedSettings), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedStatus)(nil), (*core.SeedStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedStatus_To_core_SeedStatus(a.(*SeedStatus), b.(*core.SeedStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedTaint)(nil), (*core.SeedTaint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedTaint_To_core_SeedTaint(a.(*SeedTaint), b.(*core.SeedTaint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedTaint)(nil), (*SeedTaint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedTaint_To_v1alpha1_SeedTaint(a.(*core.SeedTaint), b.(*SeedTaint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedVolume)(nil), (*core.SeedVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedVolume_To_core_SeedVolume(a.(*SeedVolume), b.(*core.SeedVolume), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedVolume)(nil), (*SeedVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedVolume_To_v1alpha1_SeedVolume(a.(*core.SeedVolume), b.(*SeedVolume), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedVolumeProvider)(nil), (*core.SeedVolumeProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedVolumeProvider_To_core_SeedVolumeProvider(a.(*SeedVolumeProvider), b.(*core.SeedVolumeProvider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedVolumeProvider)(nil), (*SeedVolumeProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedVolumeProvider_To_v1alpha1_SeedVolumeProvider(a.(*core.SeedVolumeProvider), b.(*SeedVolumeProvider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ServiceAccountConfig)(nil), (*core.ServiceAccountConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ServiceAccountConfig_To_core_ServiceAccountConfig(a.(*ServiceAccountConfig), b.(*core.ServiceAccountConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ServiceAccountConfig)(nil), (*ServiceAccountConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ServiceAccountConfig_To_v1alpha1_ServiceAccountConfig(a.(*core.ServiceAccountConfig), b.(*ServiceAccountConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Shoot)(nil), (*core.Shoot)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Shoot_To_core_Shoot(a.(*Shoot), b.(*core.Shoot), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Shoot)(nil), (*Shoot)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Shoot_To_v1alpha1_Shoot(a.(*core.Shoot), b.(*Shoot), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ShootList)(nil), (*core.ShootList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ShootList_To_core_ShootList(a.(*ShootList), b.(*core.ShootList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ShootList)(nil), (*ShootList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ShootList_To_v1alpha1_ShootList(a.(*core.ShootList), b.(*ShootList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ShootMachineImage)(nil), (*core.ShootMachineImage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ShootMachineImage_To_core_ShootMachineImage(a.(*ShootMachineImage), b.(*core.ShootMachineImage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ShootMachineImage)(nil), (*ShootMachineImage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ShootMachineImage_To_v1alpha1_ShootMachineImage(a.(*core.ShootMachineImage), b.(*ShootMachineImage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ShootNetworks)(nil), (*core.ShootNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ShootNetworks_To_core_ShootNetworks(a.(*ShootNetworks), b.(*core.ShootNetworks), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ShootNetworks)(nil), (*ShootNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ShootNetworks_To_v1alpha1_ShootNetworks(a.(*core.ShootNetworks), b.(*ShootNetworks), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ShootSpec)(nil), (*core.ShootSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ShootSpec_To_core_ShootSpec(a.(*ShootSpec), b.(*core.ShootSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ShootSpec)(nil), (*ShootSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ShootSpec_To_v1alpha1_ShootSpec(a.(*core.ShootSpec), b.(*ShootSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ShootState)(nil), (*core.ShootState)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ShootState_To_core_ShootState(a.(*ShootState), b.(*core.ShootState), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ShootState)(nil), (*ShootState)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ShootState_To_v1alpha1_ShootState(a.(*core.ShootState), b.(*ShootState), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ShootStateList)(nil), (*core.ShootStateList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ShootStateList_To_core_ShootStateList(a.(*ShootStateList), b.(*core.ShootStateList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ShootStateList)(nil), (*ShootStateList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ShootStateList_To_v1alpha1_ShootStateList(a.(*core.ShootStateList), b.(*ShootStateList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ShootStateSpec)(nil), (*core.ShootStateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ShootStateSpec_To_core_ShootStateSpec(a.(*ShootStateSpec), b.(*core.ShootStateSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ShootStateSpec)(nil), (*ShootStateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ShootStateSpec_To_v1alpha1_ShootStateSpec(a.(*core.ShootStateSpec), b.(*ShootStateSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Toleration)(nil), (*core.Toleration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Toleration_To_core_Toleration(a.(*Toleration), b.(*core.Toleration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Toleration)(nil), (*Toleration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Toleration_To_v1alpha1_Toleration(a.(*core.Toleration), b.(*Toleration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*VerticalPodAutoscaler)(nil), (*core.VerticalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler(a.(*VerticalPodAutoscaler), b.(*core.VerticalPodAutoscaler), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.VerticalPodAutoscaler)(nil), (*VerticalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_VerticalPodAutoscaler_To_v1alpha1_VerticalPodAutoscaler(a.(*core.VerticalPodAutoscaler), b.(*VerticalPodAutoscaler), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Volume)(nil), (*core.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Volume_To_core_Volume(a.(*Volume), b.(*core.Volume), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Volume)(nil), (*Volume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Volume_To_v1alpha1_Volume(a.(*core.Volume), b.(*Volume), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*VolumeType)(nil), (*core.VolumeType)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_VolumeType_To_core_VolumeType(a.(*VolumeType), b.(*core.VolumeType), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.VolumeType)(nil), (*VolumeType)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_VolumeType_To_v1alpha1_VolumeType(a.(*core.VolumeType), b.(*VolumeType), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WatchCacheSizes)(nil), (*core.WatchCacheSizes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_WatchCacheSizes_To_core_WatchCacheSizes(a.(*WatchCacheSizes), b.(*core.WatchCacheSizes), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.WatchCacheSizes)(nil), (*WatchCacheSizes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_WatchCacheSizes_To_v1alpha1_WatchCacheSizes(a.(*core.WatchCacheSizes), b.(*WatchCacheSizes), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Worker)(nil), (*core.Worker)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Worker_To_core_Worker(a.(*Worker), b.(*core.Worker), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Worker)(nil), (*Worker)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Worker_To_v1alpha1_Worker(a.(*core.Worker), b.(*Worker), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WorkerKubernetes)(nil), (*core.WorkerKubernetes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_WorkerKubernetes_To_core_WorkerKubernetes(a.(*WorkerKubernetes), b.(*core.WorkerKubernetes), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.WorkerKubernetes)(nil), (*WorkerKubernetes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_WorkerKubernetes_To_v1alpha1_WorkerKubernetes(a.(*core.WorkerKubernetes), b.(*WorkerKubernetes), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WorkerSystemComponents)(nil), (*core.WorkerSystemComponents)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_WorkerSystemComponents_To_core_WorkerSystemComponents(a.(*WorkerSystemComponents), b.(*core.WorkerSystemComponents), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.WorkerSystemComponents)(nil), (*WorkerSystemComponents)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_WorkerSystemComponents_To_v1alpha1_WorkerSystemComponents(a.(*core.WorkerSystemComponents), b.(*WorkerSystemComponents), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*core.BackupBucketSpec)(nil), (*BackupBucketSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupBucketSpec_To_v1alpha1_BackupBucketSpec(a.(*core.BackupBucketSpec), b.(*BackupBucketSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*core.BackupBucket)(nil), (*BackupBucket)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupBucket_To_v1alpha1_BackupBucket(a.(*core.BackupBucket), b.(*BackupBucket), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*core.BackupEntrySpec)(nil), (*BackupEntrySpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupEntrySpec_To_v1alpha1_BackupEntrySpec(a.(*core.BackupEntrySpec), b.(*BackupEntrySpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*core.BackupEntry)(nil), (*BackupEntry)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupEntry_To_v1alpha1_BackupEntry(a.(*core.BackupEntry), b.(*BackupEntry), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*core.ProjectMember)(nil), (*ProjectMember)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ProjectMember_To_v1alpha1_ProjectMember(a.(*core.ProjectMember), b.(*ProjectMember), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*core.ProjectSpec)(nil), (*ProjectSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ProjectSpec_To_v1alpha1_ProjectSpec(a.(*core.ProjectSpec), b.(*ProjectSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*core.SeedNetworks)(nil), (*SeedNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedNetworks_To_v1alpha1_SeedNetworks(a.(*core.SeedNetworks), b.(*SeedNetworks), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*core.SeedSpec)(nil), (*SeedSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedSpec_To_v1alpha1_SeedSpec(a.(*core.SeedSpec), b.(*SeedSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*core.SeedStatus)(nil), (*SeedStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedStatus_To_v1alpha1_SeedStatus(a.(*core.SeedStatus), b.(*SeedStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*core.Seed)(nil), (*Seed)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Seed_To_v1alpha1_Seed(a.(*core.Seed), b.(*Seed), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*core.ShootStatus)(nil), (*ShootStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ShootStatus_To_v1alpha1_ShootStatus(a.(*core.ShootStatus), b.(*ShootStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*BackupBucketSpec)(nil), (*core.BackupBucketSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_BackupBucketSpec_To_core_BackupBucketSpec(a.(*BackupBucketSpec), b.(*core.BackupBucketSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*BackupBucket)(nil), (*core.BackupBucket)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_BackupBucket_To_core_BackupBucket(a.(*BackupBucket), b.(*core.BackupBucket), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*BackupEntrySpec)(nil), (*core.BackupEntrySpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_BackupEntrySpec_To_core_BackupEntrySpec(a.(*BackupEntrySpec), b.(*core.BackupEntrySpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*BackupEntry)(nil), (*core.BackupEntry)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_BackupEntry_To_core_BackupEntry(a.(*BackupEntry), b.(*core.BackupEntry), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*ProjectMember)(nil), (*core.ProjectMember)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ProjectMember_To_core_ProjectMember(a.(*ProjectMember), b.(*core.ProjectMember), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*ProjectSpec)(nil), (*core.ProjectSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ProjectSpec_To_core_ProjectSpec(a.(*ProjectSpec), b.(*core.ProjectSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*SeedNetworks)(nil), (*core.SeedNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedNetworks_To_core_SeedNetworks(a.(*SeedNetworks), b.(*core.SeedNetworks), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*SeedSpec)(nil), (*core.SeedSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedSpec_To_core_SeedSpec(a.(*SeedSpec), b.(*core.SeedSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*Seed)(nil), (*core.Seed)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Seed_To_core_Seed(a.(*Seed), b.(*core.Seed), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*ShootStatus)(nil), (*core.ShootStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ShootStatus_To_core_ShootStatus(a.(*ShootStatus), b.(*core.ShootStatus), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_Addon_To_core_Addon(in *Addon, out *core.Addon, s conversion.Scope) error { + out.Enabled = in.Enabled + return nil +} + +// Convert_v1alpha1_Addon_To_core_Addon is an autogenerated conversion function. +func Convert_v1alpha1_Addon_To_core_Addon(in *Addon, out *core.Addon, s conversion.Scope) error { + return autoConvert_v1alpha1_Addon_To_core_Addon(in, out, s) +} + +func autoConvert_core_Addon_To_v1alpha1_Addon(in *core.Addon, out *Addon, s conversion.Scope) error { + out.Enabled = in.Enabled + return nil +} + +// Convert_core_Addon_To_v1alpha1_Addon is an autogenerated conversion function. +func Convert_core_Addon_To_v1alpha1_Addon(in *core.Addon, out *Addon, s conversion.Scope) error { + return autoConvert_core_Addon_To_v1alpha1_Addon(in, out, s) +} + +func autoConvert_v1alpha1_Addons_To_core_Addons(in *Addons, out *core.Addons, s conversion.Scope) error { + out.KubernetesDashboard = (*core.KubernetesDashboard)(unsafe.Pointer(in.KubernetesDashboard)) + out.NginxIngress = (*core.NginxIngress)(unsafe.Pointer(in.NginxIngress)) + return nil +} + +// Convert_v1alpha1_Addons_To_core_Addons is an autogenerated conversion function. +func Convert_v1alpha1_Addons_To_core_Addons(in *Addons, out *core.Addons, s conversion.Scope) error { + return autoConvert_v1alpha1_Addons_To_core_Addons(in, out, s) +} + +func autoConvert_core_Addons_To_v1alpha1_Addons(in *core.Addons, out *Addons, s conversion.Scope) error { + out.KubernetesDashboard = (*KubernetesDashboard)(unsafe.Pointer(in.KubernetesDashboard)) + out.NginxIngress = (*NginxIngress)(unsafe.Pointer(in.NginxIngress)) + return nil +} + +// Convert_core_Addons_To_v1alpha1_Addons is an autogenerated conversion function. +func Convert_core_Addons_To_v1alpha1_Addons(in *core.Addons, out *Addons, s conversion.Scope) error { + return autoConvert_core_Addons_To_v1alpha1_Addons(in, out, s) +} + +func autoConvert_v1alpha1_AdmissionPlugin_To_core_AdmissionPlugin(in *AdmissionPlugin, out *core.AdmissionPlugin, s conversion.Scope) error { + out.Name = in.Name + out.Config = (*runtime.RawExtension)(unsafe.Pointer(in.Config)) + return nil +} + +// Convert_v1alpha1_AdmissionPlugin_To_core_AdmissionPlugin is an autogenerated conversion function. +func Convert_v1alpha1_AdmissionPlugin_To_core_AdmissionPlugin(in *AdmissionPlugin, out *core.AdmissionPlugin, s conversion.Scope) error { + return autoConvert_v1alpha1_AdmissionPlugin_To_core_AdmissionPlugin(in, out, s) +} + +func autoConvert_core_AdmissionPlugin_To_v1alpha1_AdmissionPlugin(in *core.AdmissionPlugin, out *AdmissionPlugin, s conversion.Scope) error { + out.Name = in.Name + out.Config = (*runtime.RawExtension)(unsafe.Pointer(in.Config)) + return nil +} + +// Convert_core_AdmissionPlugin_To_v1alpha1_AdmissionPlugin is an autogenerated conversion function. +func Convert_core_AdmissionPlugin_To_v1alpha1_AdmissionPlugin(in *core.AdmissionPlugin, out *AdmissionPlugin, s conversion.Scope) error { + return autoConvert_core_AdmissionPlugin_To_v1alpha1_AdmissionPlugin(in, out, s) +} + +func autoConvert_v1alpha1_Alerting_To_core_Alerting(in *Alerting, out *core.Alerting, s conversion.Scope) error { + out.EmailReceivers = *(*[]string)(unsafe.Pointer(&in.EmailReceivers)) + return nil +} + +// Convert_v1alpha1_Alerting_To_core_Alerting is an autogenerated conversion function. +func Convert_v1alpha1_Alerting_To_core_Alerting(in *Alerting, out *core.Alerting, s conversion.Scope) error { + return autoConvert_v1alpha1_Alerting_To_core_Alerting(in, out, s) +} + +func autoConvert_core_Alerting_To_v1alpha1_Alerting(in *core.Alerting, out *Alerting, s conversion.Scope) error { + out.EmailReceivers = *(*[]string)(unsafe.Pointer(&in.EmailReceivers)) + return nil +} + +// Convert_core_Alerting_To_v1alpha1_Alerting is an autogenerated conversion function. +func Convert_core_Alerting_To_v1alpha1_Alerting(in *core.Alerting, out *Alerting, s conversion.Scope) error { + return autoConvert_core_Alerting_To_v1alpha1_Alerting(in, out, s) +} + +func autoConvert_v1alpha1_AuditConfig_To_core_AuditConfig(in *AuditConfig, out *core.AuditConfig, s conversion.Scope) error { + out.AuditPolicy = (*core.AuditPolicy)(unsafe.Pointer(in.AuditPolicy)) + return nil +} + +// Convert_v1alpha1_AuditConfig_To_core_AuditConfig is an autogenerated conversion function. +func Convert_v1alpha1_AuditConfig_To_core_AuditConfig(in *AuditConfig, out *core.AuditConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_AuditConfig_To_core_AuditConfig(in, out, s) +} + +func autoConvert_core_AuditConfig_To_v1alpha1_AuditConfig(in *core.AuditConfig, out *AuditConfig, s conversion.Scope) error { + out.AuditPolicy = (*AuditPolicy)(unsafe.Pointer(in.AuditPolicy)) + return nil +} + +// Convert_core_AuditConfig_To_v1alpha1_AuditConfig is an autogenerated conversion function. +func Convert_core_AuditConfig_To_v1alpha1_AuditConfig(in *core.AuditConfig, out *AuditConfig, s conversion.Scope) error { + return autoConvert_core_AuditConfig_To_v1alpha1_AuditConfig(in, out, s) +} + +func autoConvert_v1alpha1_AuditPolicy_To_core_AuditPolicy(in *AuditPolicy, out *core.AuditPolicy, s conversion.Scope) error { + out.ConfigMapRef = (*v1.ObjectReference)(unsafe.Pointer(in.ConfigMapRef)) + return nil +} + +// Convert_v1alpha1_AuditPolicy_To_core_AuditPolicy is an autogenerated conversion function. +func Convert_v1alpha1_AuditPolicy_To_core_AuditPolicy(in *AuditPolicy, out *core.AuditPolicy, s conversion.Scope) error { + return autoConvert_v1alpha1_AuditPolicy_To_core_AuditPolicy(in, out, s) +} + +func autoConvert_core_AuditPolicy_To_v1alpha1_AuditPolicy(in *core.AuditPolicy, out *AuditPolicy, s conversion.Scope) error { + out.ConfigMapRef = (*v1.ObjectReference)(unsafe.Pointer(in.ConfigMapRef)) + return nil +} + +// Convert_core_AuditPolicy_To_v1alpha1_AuditPolicy is an autogenerated conversion function. +func Convert_core_AuditPolicy_To_v1alpha1_AuditPolicy(in *core.AuditPolicy, out *AuditPolicy, s conversion.Scope) error { + return autoConvert_core_AuditPolicy_To_v1alpha1_AuditPolicy(in, out, s) +} + +func autoConvert_v1alpha1_AvailabilityZone_To_core_AvailabilityZone(in *AvailabilityZone, out *core.AvailabilityZone, s conversion.Scope) error { + out.Name = in.Name + out.UnavailableMachineTypes = *(*[]string)(unsafe.Pointer(&in.UnavailableMachineTypes)) + out.UnavailableVolumeTypes = *(*[]string)(unsafe.Pointer(&in.UnavailableVolumeTypes)) + return nil +} + +// Convert_v1alpha1_AvailabilityZone_To_core_AvailabilityZone is an autogenerated conversion function. +func Convert_v1alpha1_AvailabilityZone_To_core_AvailabilityZone(in *AvailabilityZone, out *core.AvailabilityZone, s conversion.Scope) error { + return autoConvert_v1alpha1_AvailabilityZone_To_core_AvailabilityZone(in, out, s) +} + +func autoConvert_core_AvailabilityZone_To_v1alpha1_AvailabilityZone(in *core.AvailabilityZone, out *AvailabilityZone, s conversion.Scope) error { + out.Name = in.Name + out.UnavailableMachineTypes = *(*[]string)(unsafe.Pointer(&in.UnavailableMachineTypes)) + out.UnavailableVolumeTypes = *(*[]string)(unsafe.Pointer(&in.UnavailableVolumeTypes)) + return nil +} + +// Convert_core_AvailabilityZone_To_v1alpha1_AvailabilityZone is an autogenerated conversion function. +func Convert_core_AvailabilityZone_To_v1alpha1_AvailabilityZone(in *core.AvailabilityZone, out *AvailabilityZone, s conversion.Scope) error { + return autoConvert_core_AvailabilityZone_To_v1alpha1_AvailabilityZone(in, out, s) +} + +func autoConvert_v1alpha1_BackupBucket_To_core_BackupBucket(in *BackupBucket, out *core.BackupBucket, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_BackupBucketSpec_To_core_BackupBucketSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_BackupBucketStatus_To_core_BackupBucketStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_core_BackupBucket_To_v1alpha1_BackupBucket(in *core.BackupBucket, out *BackupBucket, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_BackupBucketSpec_To_v1alpha1_BackupBucketSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_BackupBucketStatus_To_v1alpha1_BackupBucketStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_BackupBucketList_To_core_BackupBucketList(in *BackupBucketList, out *core.BackupBucketList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.BackupBucket, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_BackupBucket_To_core_BackupBucket(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1alpha1_BackupBucketList_To_core_BackupBucketList is an autogenerated conversion function. +func Convert_v1alpha1_BackupBucketList_To_core_BackupBucketList(in *BackupBucketList, out *core.BackupBucketList, s conversion.Scope) error { + return autoConvert_v1alpha1_BackupBucketList_To_core_BackupBucketList(in, out, s) +} + +func autoConvert_core_BackupBucketList_To_v1alpha1_BackupBucketList(in *core.BackupBucketList, out *BackupBucketList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupBucket, len(*in)) + for i := range *in { + if err := Convert_core_BackupBucket_To_v1alpha1_BackupBucket(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_BackupBucketList_To_v1alpha1_BackupBucketList is an autogenerated conversion function. +func Convert_core_BackupBucketList_To_v1alpha1_BackupBucketList(in *core.BackupBucketList, out *BackupBucketList, s conversion.Scope) error { + return autoConvert_core_BackupBucketList_To_v1alpha1_BackupBucketList(in, out, s) +} + +func autoConvert_v1alpha1_BackupBucketProvider_To_core_BackupBucketProvider(in *BackupBucketProvider, out *core.BackupBucketProvider, s conversion.Scope) error { + out.Type = in.Type + out.Region = in.Region + return nil +} + +// Convert_v1alpha1_BackupBucketProvider_To_core_BackupBucketProvider is an autogenerated conversion function. +func Convert_v1alpha1_BackupBucketProvider_To_core_BackupBucketProvider(in *BackupBucketProvider, out *core.BackupBucketProvider, s conversion.Scope) error { + return autoConvert_v1alpha1_BackupBucketProvider_To_core_BackupBucketProvider(in, out, s) +} + +func autoConvert_core_BackupBucketProvider_To_v1alpha1_BackupBucketProvider(in *core.BackupBucketProvider, out *BackupBucketProvider, s conversion.Scope) error { + out.Type = in.Type + out.Region = in.Region + return nil +} + +// Convert_core_BackupBucketProvider_To_v1alpha1_BackupBucketProvider is an autogenerated conversion function. +func Convert_core_BackupBucketProvider_To_v1alpha1_BackupBucketProvider(in *core.BackupBucketProvider, out *BackupBucketProvider, s conversion.Scope) error { + return autoConvert_core_BackupBucketProvider_To_v1alpha1_BackupBucketProvider(in, out, s) +} + +func autoConvert_v1alpha1_BackupBucketSpec_To_core_BackupBucketSpec(in *BackupBucketSpec, out *core.BackupBucketSpec, s conversion.Scope) error { + if err := Convert_v1alpha1_BackupBucketProvider_To_core_BackupBucketProvider(&in.Provider, &out.Provider, s); err != nil { + return err + } + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.SecretRef = in.SecretRef + // WARNING: in.Seed requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_core_BackupBucketSpec_To_v1alpha1_BackupBucketSpec(in *core.BackupBucketSpec, out *BackupBucketSpec, s conversion.Scope) error { + if err := Convert_core_BackupBucketProvider_To_v1alpha1_BackupBucketProvider(&in.Provider, &out.Provider, s); err != nil { + return err + } + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.SecretRef = in.SecretRef + // WARNING: in.SeedName requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1alpha1_BackupBucketStatus_To_core_BackupBucketStatus(in *BackupBucketStatus, out *core.BackupBucketStatus, s conversion.Scope) error { + out.ProviderStatus = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderStatus)) + out.LastOperation = (*core.LastOperation)(unsafe.Pointer(in.LastOperation)) + out.LastError = (*core.LastError)(unsafe.Pointer(in.LastError)) + out.ObservedGeneration = in.ObservedGeneration + out.GeneratedSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.GeneratedSecretRef)) + return nil +} + +// Convert_v1alpha1_BackupBucketStatus_To_core_BackupBucketStatus is an autogenerated conversion function. +func Convert_v1alpha1_BackupBucketStatus_To_core_BackupBucketStatus(in *BackupBucketStatus, out *core.BackupBucketStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_BackupBucketStatus_To_core_BackupBucketStatus(in, out, s) +} + +func autoConvert_core_BackupBucketStatus_To_v1alpha1_BackupBucketStatus(in *core.BackupBucketStatus, out *BackupBucketStatus, s conversion.Scope) error { + out.ProviderStatus = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderStatus)) + out.LastOperation = (*LastOperation)(unsafe.Pointer(in.LastOperation)) + out.LastError = (*LastError)(unsafe.Pointer(in.LastError)) + out.ObservedGeneration = in.ObservedGeneration + out.GeneratedSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.GeneratedSecretRef)) + return nil +} + +// Convert_core_BackupBucketStatus_To_v1alpha1_BackupBucketStatus is an autogenerated conversion function. +func Convert_core_BackupBucketStatus_To_v1alpha1_BackupBucketStatus(in *core.BackupBucketStatus, out *BackupBucketStatus, s conversion.Scope) error { + return autoConvert_core_BackupBucketStatus_To_v1alpha1_BackupBucketStatus(in, out, s) +} + +func autoConvert_v1alpha1_BackupEntry_To_core_BackupEntry(in *BackupEntry, out *core.BackupEntry, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_BackupEntrySpec_To_core_BackupEntrySpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_BackupEntryStatus_To_core_BackupEntryStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_core_BackupEntry_To_v1alpha1_BackupEntry(in *core.BackupEntry, out *BackupEntry, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_BackupEntrySpec_To_v1alpha1_BackupEntrySpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_BackupEntryStatus_To_v1alpha1_BackupEntryStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_BackupEntryList_To_core_BackupEntryList(in *BackupEntryList, out *core.BackupEntryList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.BackupEntry, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_BackupEntry_To_core_BackupEntry(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1alpha1_BackupEntryList_To_core_BackupEntryList is an autogenerated conversion function. +func Convert_v1alpha1_BackupEntryList_To_core_BackupEntryList(in *BackupEntryList, out *core.BackupEntryList, s conversion.Scope) error { + return autoConvert_v1alpha1_BackupEntryList_To_core_BackupEntryList(in, out, s) +} + +func autoConvert_core_BackupEntryList_To_v1alpha1_BackupEntryList(in *core.BackupEntryList, out *BackupEntryList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupEntry, len(*in)) + for i := range *in { + if err := Convert_core_BackupEntry_To_v1alpha1_BackupEntry(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_BackupEntryList_To_v1alpha1_BackupEntryList is an autogenerated conversion function. +func Convert_core_BackupEntryList_To_v1alpha1_BackupEntryList(in *core.BackupEntryList, out *BackupEntryList, s conversion.Scope) error { + return autoConvert_core_BackupEntryList_To_v1alpha1_BackupEntryList(in, out, s) +} + +func autoConvert_v1alpha1_BackupEntrySpec_To_core_BackupEntrySpec(in *BackupEntrySpec, out *core.BackupEntrySpec, s conversion.Scope) error { + out.BucketName = in.BucketName + // WARNING: in.Seed requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_core_BackupEntrySpec_To_v1alpha1_BackupEntrySpec(in *core.BackupEntrySpec, out *BackupEntrySpec, s conversion.Scope) error { + out.BucketName = in.BucketName + // WARNING: in.SeedName requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1alpha1_BackupEntryStatus_To_core_BackupEntryStatus(in *BackupEntryStatus, out *core.BackupEntryStatus, s conversion.Scope) error { + out.LastOperation = (*core.LastOperation)(unsafe.Pointer(in.LastOperation)) + out.LastError = (*core.LastError)(unsafe.Pointer(in.LastError)) + out.ObservedGeneration = in.ObservedGeneration + return nil +} + +// Convert_v1alpha1_BackupEntryStatus_To_core_BackupEntryStatus is an autogenerated conversion function. +func Convert_v1alpha1_BackupEntryStatus_To_core_BackupEntryStatus(in *BackupEntryStatus, out *core.BackupEntryStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_BackupEntryStatus_To_core_BackupEntryStatus(in, out, s) +} + +func autoConvert_core_BackupEntryStatus_To_v1alpha1_BackupEntryStatus(in *core.BackupEntryStatus, out *BackupEntryStatus, s conversion.Scope) error { + out.LastOperation = (*LastOperation)(unsafe.Pointer(in.LastOperation)) + out.LastError = (*LastError)(unsafe.Pointer(in.LastError)) + out.ObservedGeneration = in.ObservedGeneration + return nil +} + +// Convert_core_BackupEntryStatus_To_v1alpha1_BackupEntryStatus is an autogenerated conversion function. +func Convert_core_BackupEntryStatus_To_v1alpha1_BackupEntryStatus(in *core.BackupEntryStatus, out *BackupEntryStatus, s conversion.Scope) error { + return autoConvert_core_BackupEntryStatus_To_v1alpha1_BackupEntryStatus(in, out, s) +} + +func autoConvert_v1alpha1_CRI_To_core_CRI(in *CRI, out *core.CRI, s conversion.Scope) error { + out.Name = core.CRIName(in.Name) + out.ContainerRuntimes = *(*[]core.ContainerRuntime)(unsafe.Pointer(&in.ContainerRuntimes)) + return nil +} + +// Convert_v1alpha1_CRI_To_core_CRI is an autogenerated conversion function. +func Convert_v1alpha1_CRI_To_core_CRI(in *CRI, out *core.CRI, s conversion.Scope) error { + return autoConvert_v1alpha1_CRI_To_core_CRI(in, out, s) +} + +func autoConvert_core_CRI_To_v1alpha1_CRI(in *core.CRI, out *CRI, s conversion.Scope) error { + out.Name = CRIName(in.Name) + out.ContainerRuntimes = *(*[]ContainerRuntime)(unsafe.Pointer(&in.ContainerRuntimes)) + return nil +} + +// Convert_core_CRI_To_v1alpha1_CRI is an autogenerated conversion function. +func Convert_core_CRI_To_v1alpha1_CRI(in *core.CRI, out *CRI, s conversion.Scope) error { + return autoConvert_core_CRI_To_v1alpha1_CRI(in, out, s) +} + +func autoConvert_v1alpha1_CloudInfo_To_core_CloudInfo(in *CloudInfo, out *core.CloudInfo, s conversion.Scope) error { + out.Type = in.Type + out.Region = in.Region + return nil +} + +// Convert_v1alpha1_CloudInfo_To_core_CloudInfo is an autogenerated conversion function. +func Convert_v1alpha1_CloudInfo_To_core_CloudInfo(in *CloudInfo, out *core.CloudInfo, s conversion.Scope) error { + return autoConvert_v1alpha1_CloudInfo_To_core_CloudInfo(in, out, s) +} + +func autoConvert_core_CloudInfo_To_v1alpha1_CloudInfo(in *core.CloudInfo, out *CloudInfo, s conversion.Scope) error { + out.Type = in.Type + out.Region = in.Region + return nil +} + +// Convert_core_CloudInfo_To_v1alpha1_CloudInfo is an autogenerated conversion function. +func Convert_core_CloudInfo_To_v1alpha1_CloudInfo(in *core.CloudInfo, out *CloudInfo, s conversion.Scope) error { + return autoConvert_core_CloudInfo_To_v1alpha1_CloudInfo(in, out, s) +} + +func autoConvert_v1alpha1_CloudProfile_To_core_CloudProfile(in *CloudProfile, out *core.CloudProfile, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_CloudProfileSpec_To_core_CloudProfileSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_CloudProfile_To_core_CloudProfile is an autogenerated conversion function. +func Convert_v1alpha1_CloudProfile_To_core_CloudProfile(in *CloudProfile, out *core.CloudProfile, s conversion.Scope) error { + return autoConvert_v1alpha1_CloudProfile_To_core_CloudProfile(in, out, s) +} + +func autoConvert_core_CloudProfile_To_v1alpha1_CloudProfile(in *core.CloudProfile, out *CloudProfile, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_CloudProfileSpec_To_v1alpha1_CloudProfileSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_core_CloudProfile_To_v1alpha1_CloudProfile is an autogenerated conversion function. +func Convert_core_CloudProfile_To_v1alpha1_CloudProfile(in *core.CloudProfile, out *CloudProfile, s conversion.Scope) error { + return autoConvert_core_CloudProfile_To_v1alpha1_CloudProfile(in, out, s) +} + +func autoConvert_v1alpha1_CloudProfileList_To_core_CloudProfileList(in *CloudProfileList, out *core.CloudProfileList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.CloudProfile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_CloudProfileList_To_core_CloudProfileList is an autogenerated conversion function. +func Convert_v1alpha1_CloudProfileList_To_core_CloudProfileList(in *CloudProfileList, out *core.CloudProfileList, s conversion.Scope) error { + return autoConvert_v1alpha1_CloudProfileList_To_core_CloudProfileList(in, out, s) +} + +func autoConvert_core_CloudProfileList_To_v1alpha1_CloudProfileList(in *core.CloudProfileList, out *CloudProfileList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]CloudProfile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_CloudProfileList_To_v1alpha1_CloudProfileList is an autogenerated conversion function. +func Convert_core_CloudProfileList_To_v1alpha1_CloudProfileList(in *core.CloudProfileList, out *CloudProfileList, s conversion.Scope) error { + return autoConvert_core_CloudProfileList_To_v1alpha1_CloudProfileList(in, out, s) +} + +func autoConvert_v1alpha1_CloudProfileSpec_To_core_CloudProfileSpec(in *CloudProfileSpec, out *core.CloudProfileSpec, s conversion.Scope) error { + out.CABundle = (*string)(unsafe.Pointer(in.CABundle)) + if err := Convert_v1alpha1_KubernetesSettings_To_core_KubernetesSettings(&in.Kubernetes, &out.Kubernetes, s); err != nil { + return err + } + out.MachineImages = *(*[]core.MachineImage)(unsafe.Pointer(&in.MachineImages)) + out.MachineTypes = *(*[]core.MachineType)(unsafe.Pointer(&in.MachineTypes)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.Regions = *(*[]core.Region)(unsafe.Pointer(&in.Regions)) + out.SeedSelector = (*core.SeedSelector)(unsafe.Pointer(in.SeedSelector)) + out.Type = in.Type + out.VolumeTypes = *(*[]core.VolumeType)(unsafe.Pointer(&in.VolumeTypes)) + return nil +} + +// Convert_v1alpha1_CloudProfileSpec_To_core_CloudProfileSpec is an autogenerated conversion function. +func Convert_v1alpha1_CloudProfileSpec_To_core_CloudProfileSpec(in *CloudProfileSpec, out *core.CloudProfileSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_CloudProfileSpec_To_core_CloudProfileSpec(in, out, s) +} + +func autoConvert_core_CloudProfileSpec_To_v1alpha1_CloudProfileSpec(in *core.CloudProfileSpec, out *CloudProfileSpec, s conversion.Scope) error { + out.CABundle = (*string)(unsafe.Pointer(in.CABundle)) + if err := Convert_core_KubernetesSettings_To_v1alpha1_KubernetesSettings(&in.Kubernetes, &out.Kubernetes, s); err != nil { + return err + } + out.MachineImages = *(*[]MachineImage)(unsafe.Pointer(&in.MachineImages)) + out.MachineTypes = *(*[]MachineType)(unsafe.Pointer(&in.MachineTypes)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.Regions = *(*[]Region)(unsafe.Pointer(&in.Regions)) + out.SeedSelector = (*SeedSelector)(unsafe.Pointer(in.SeedSelector)) + out.Type = in.Type + out.VolumeTypes = *(*[]VolumeType)(unsafe.Pointer(&in.VolumeTypes)) + return nil +} + +// Convert_core_CloudProfileSpec_To_v1alpha1_CloudProfileSpec is an autogenerated conversion function. +func Convert_core_CloudProfileSpec_To_v1alpha1_CloudProfileSpec(in *core.CloudProfileSpec, out *CloudProfileSpec, s conversion.Scope) error { + return autoConvert_core_CloudProfileSpec_To_v1alpha1_CloudProfileSpec(in, out, s) +} + +func autoConvert_v1alpha1_ClusterAutoscaler_To_core_ClusterAutoscaler(in *ClusterAutoscaler, out *core.ClusterAutoscaler, s conversion.Scope) error { + out.ScaleDownDelayAfterAdd = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterAdd)) + out.ScaleDownDelayAfterDelete = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterDelete)) + out.ScaleDownDelayAfterFailure = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterFailure)) + out.ScaleDownUnneededTime = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownUnneededTime)) + out.ScaleDownUtilizationThreshold = (*float64)(unsafe.Pointer(in.ScaleDownUtilizationThreshold)) + out.ScanInterval = (*metav1.Duration)(unsafe.Pointer(in.ScanInterval)) + return nil +} + +// Convert_v1alpha1_ClusterAutoscaler_To_core_ClusterAutoscaler is an autogenerated conversion function. +func Convert_v1alpha1_ClusterAutoscaler_To_core_ClusterAutoscaler(in *ClusterAutoscaler, out *core.ClusterAutoscaler, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterAutoscaler_To_core_ClusterAutoscaler(in, out, s) +} + +func autoConvert_core_ClusterAutoscaler_To_v1alpha1_ClusterAutoscaler(in *core.ClusterAutoscaler, out *ClusterAutoscaler, s conversion.Scope) error { + out.ScaleDownDelayAfterAdd = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterAdd)) + out.ScaleDownDelayAfterDelete = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterDelete)) + out.ScaleDownDelayAfterFailure = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterFailure)) + out.ScaleDownUnneededTime = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownUnneededTime)) + out.ScaleDownUtilizationThreshold = (*float64)(unsafe.Pointer(in.ScaleDownUtilizationThreshold)) + out.ScanInterval = (*metav1.Duration)(unsafe.Pointer(in.ScanInterval)) + return nil +} + +// Convert_core_ClusterAutoscaler_To_v1alpha1_ClusterAutoscaler is an autogenerated conversion function. +func Convert_core_ClusterAutoscaler_To_v1alpha1_ClusterAutoscaler(in *core.ClusterAutoscaler, out *ClusterAutoscaler, s conversion.Scope) error { + return autoConvert_core_ClusterAutoscaler_To_v1alpha1_ClusterAutoscaler(in, out, s) +} + +func autoConvert_v1alpha1_ClusterInfo_To_core_ClusterInfo(in *ClusterInfo, out *core.ClusterInfo, s conversion.Scope) error { + if err := Convert_v1alpha1_CloudInfo_To_core_CloudInfo(&in.Cloud, &out.Cloud, s); err != nil { + return err + } + if err := Convert_v1alpha1_KubernetesInfo_To_core_KubernetesInfo(&in.Kubernetes, &out.Kubernetes, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_ClusterInfo_To_core_ClusterInfo is an autogenerated conversion function. +func Convert_v1alpha1_ClusterInfo_To_core_ClusterInfo(in *ClusterInfo, out *core.ClusterInfo, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterInfo_To_core_ClusterInfo(in, out, s) +} + +func autoConvert_core_ClusterInfo_To_v1alpha1_ClusterInfo(in *core.ClusterInfo, out *ClusterInfo, s conversion.Scope) error { + if err := Convert_core_CloudInfo_To_v1alpha1_CloudInfo(&in.Cloud, &out.Cloud, s); err != nil { + return err + } + if err := Convert_core_KubernetesInfo_To_v1alpha1_KubernetesInfo(&in.Kubernetes, &out.Kubernetes, s); err != nil { + return err + } + return nil +} + +// Convert_core_ClusterInfo_To_v1alpha1_ClusterInfo is an autogenerated conversion function. +func Convert_core_ClusterInfo_To_v1alpha1_ClusterInfo(in *core.ClusterInfo, out *ClusterInfo, s conversion.Scope) error { + return autoConvert_core_ClusterInfo_To_v1alpha1_ClusterInfo(in, out, s) +} + +func autoConvert_v1alpha1_Condition_To_core_Condition(in *Condition, out *core.Condition, s conversion.Scope) error { + out.Type = core.ConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.LastUpdateTime = in.LastUpdateTime + out.Reason = in.Reason + out.Message = in.Message + out.Codes = *(*[]core.ErrorCode)(unsafe.Pointer(&in.Codes)) + return nil +} + +// Convert_v1alpha1_Condition_To_core_Condition is an autogenerated conversion function. +func Convert_v1alpha1_Condition_To_core_Condition(in *Condition, out *core.Condition, s conversion.Scope) error { + return autoConvert_v1alpha1_Condition_To_core_Condition(in, out, s) +} + +func autoConvert_core_Condition_To_v1alpha1_Condition(in *core.Condition, out *Condition, s conversion.Scope) error { + out.Type = ConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.LastUpdateTime = in.LastUpdateTime + out.Reason = in.Reason + out.Message = in.Message + out.Codes = *(*[]ErrorCode)(unsafe.Pointer(&in.Codes)) + return nil +} + +// Convert_core_Condition_To_v1alpha1_Condition is an autogenerated conversion function. +func Convert_core_Condition_To_v1alpha1_Condition(in *core.Condition, out *Condition, s conversion.Scope) error { + return autoConvert_core_Condition_To_v1alpha1_Condition(in, out, s) +} + +func autoConvert_v1alpha1_ContainerRuntime_To_core_ContainerRuntime(in *ContainerRuntime, out *core.ContainerRuntime, s conversion.Scope) error { + out.Type = in.Type + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + return nil +} + +// Convert_v1alpha1_ContainerRuntime_To_core_ContainerRuntime is an autogenerated conversion function. +func Convert_v1alpha1_ContainerRuntime_To_core_ContainerRuntime(in *ContainerRuntime, out *core.ContainerRuntime, s conversion.Scope) error { + return autoConvert_v1alpha1_ContainerRuntime_To_core_ContainerRuntime(in, out, s) +} + +func autoConvert_core_ContainerRuntime_To_v1alpha1_ContainerRuntime(in *core.ContainerRuntime, out *ContainerRuntime, s conversion.Scope) error { + out.Type = in.Type + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + return nil +} + +// Convert_core_ContainerRuntime_To_v1alpha1_ContainerRuntime is an autogenerated conversion function. +func Convert_core_ContainerRuntime_To_v1alpha1_ContainerRuntime(in *core.ContainerRuntime, out *ContainerRuntime, s conversion.Scope) error { + return autoConvert_core_ContainerRuntime_To_v1alpha1_ContainerRuntime(in, out, s) +} + +func autoConvert_v1alpha1_ControllerDeployment_To_core_ControllerDeployment(in *ControllerDeployment, out *core.ControllerDeployment, s conversion.Scope) error { + out.Type = in.Type + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.Policy = (*core.ControllerDeploymentPolicy)(unsafe.Pointer(in.Policy)) + out.SeedSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.SeedSelector)) + return nil +} + +// Convert_v1alpha1_ControllerDeployment_To_core_ControllerDeployment is an autogenerated conversion function. +func Convert_v1alpha1_ControllerDeployment_To_core_ControllerDeployment(in *ControllerDeployment, out *core.ControllerDeployment, s conversion.Scope) error { + return autoConvert_v1alpha1_ControllerDeployment_To_core_ControllerDeployment(in, out, s) +} + +func autoConvert_core_ControllerDeployment_To_v1alpha1_ControllerDeployment(in *core.ControllerDeployment, out *ControllerDeployment, s conversion.Scope) error { + out.Type = in.Type + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.Policy = (*ControllerDeploymentPolicy)(unsafe.Pointer(in.Policy)) + out.SeedSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.SeedSelector)) + return nil +} + +// Convert_core_ControllerDeployment_To_v1alpha1_ControllerDeployment is an autogenerated conversion function. +func Convert_core_ControllerDeployment_To_v1alpha1_ControllerDeployment(in *core.ControllerDeployment, out *ControllerDeployment, s conversion.Scope) error { + return autoConvert_core_ControllerDeployment_To_v1alpha1_ControllerDeployment(in, out, s) +} + +func autoConvert_v1alpha1_ControllerInstallation_To_core_ControllerInstallation(in *ControllerInstallation, out *core.ControllerInstallation, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_ControllerInstallation_To_core_ControllerInstallation is an autogenerated conversion function. +func Convert_v1alpha1_ControllerInstallation_To_core_ControllerInstallation(in *ControllerInstallation, out *core.ControllerInstallation, s conversion.Scope) error { + return autoConvert_v1alpha1_ControllerInstallation_To_core_ControllerInstallation(in, out, s) +} + +func autoConvert_core_ControllerInstallation_To_v1alpha1_ControllerInstallation(in *core.ControllerInstallation, out *ControllerInstallation, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ControllerInstallationSpec_To_v1alpha1_ControllerInstallationSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_ControllerInstallationStatus_To_v1alpha1_ControllerInstallationStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_ControllerInstallation_To_v1alpha1_ControllerInstallation is an autogenerated conversion function. +func Convert_core_ControllerInstallation_To_v1alpha1_ControllerInstallation(in *core.ControllerInstallation, out *ControllerInstallation, s conversion.Scope) error { + return autoConvert_core_ControllerInstallation_To_v1alpha1_ControllerInstallation(in, out, s) +} + +func autoConvert_v1alpha1_ControllerInstallationList_To_core_ControllerInstallationList(in *ControllerInstallationList, out *core.ControllerInstallationList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ControllerInstallation)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_ControllerInstallationList_To_core_ControllerInstallationList is an autogenerated conversion function. +func Convert_v1alpha1_ControllerInstallationList_To_core_ControllerInstallationList(in *ControllerInstallationList, out *core.ControllerInstallationList, s conversion.Scope) error { + return autoConvert_v1alpha1_ControllerInstallationList_To_core_ControllerInstallationList(in, out, s) +} + +func autoConvert_core_ControllerInstallationList_To_v1alpha1_ControllerInstallationList(in *core.ControllerInstallationList, out *ControllerInstallationList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]ControllerInstallation)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ControllerInstallationList_To_v1alpha1_ControllerInstallationList is an autogenerated conversion function. +func Convert_core_ControllerInstallationList_To_v1alpha1_ControllerInstallationList(in *core.ControllerInstallationList, out *ControllerInstallationList, s conversion.Scope) error { + return autoConvert_core_ControllerInstallationList_To_v1alpha1_ControllerInstallationList(in, out, s) +} + +func autoConvert_v1alpha1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(in *ControllerInstallationSpec, out *core.ControllerInstallationSpec, s conversion.Scope) error { + out.RegistrationRef = in.RegistrationRef + out.SeedRef = in.SeedRef + return nil +} + +// Convert_v1alpha1_ControllerInstallationSpec_To_core_ControllerInstallationSpec is an autogenerated conversion function. +func Convert_v1alpha1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(in *ControllerInstallationSpec, out *core.ControllerInstallationSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(in, out, s) +} + +func autoConvert_core_ControllerInstallationSpec_To_v1alpha1_ControllerInstallationSpec(in *core.ControllerInstallationSpec, out *ControllerInstallationSpec, s conversion.Scope) error { + out.RegistrationRef = in.RegistrationRef + out.SeedRef = in.SeedRef + return nil +} + +// Convert_core_ControllerInstallationSpec_To_v1alpha1_ControllerInstallationSpec is an autogenerated conversion function. +func Convert_core_ControllerInstallationSpec_To_v1alpha1_ControllerInstallationSpec(in *core.ControllerInstallationSpec, out *ControllerInstallationSpec, s conversion.Scope) error { + return autoConvert_core_ControllerInstallationSpec_To_v1alpha1_ControllerInstallationSpec(in, out, s) +} + +func autoConvert_v1alpha1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(in *ControllerInstallationStatus, out *core.ControllerInstallationStatus, s conversion.Scope) error { + out.Conditions = *(*[]core.Condition)(unsafe.Pointer(&in.Conditions)) + out.ProviderStatus = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderStatus)) + return nil +} + +// Convert_v1alpha1_ControllerInstallationStatus_To_core_ControllerInstallationStatus is an autogenerated conversion function. +func Convert_v1alpha1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(in *ControllerInstallationStatus, out *core.ControllerInstallationStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(in, out, s) +} + +func autoConvert_core_ControllerInstallationStatus_To_v1alpha1_ControllerInstallationStatus(in *core.ControllerInstallationStatus, out *ControllerInstallationStatus, s conversion.Scope) error { + out.Conditions = *(*[]Condition)(unsafe.Pointer(&in.Conditions)) + out.ProviderStatus = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderStatus)) + return nil +} + +// Convert_core_ControllerInstallationStatus_To_v1alpha1_ControllerInstallationStatus is an autogenerated conversion function. +func Convert_core_ControllerInstallationStatus_To_v1alpha1_ControllerInstallationStatus(in *core.ControllerInstallationStatus, out *ControllerInstallationStatus, s conversion.Scope) error { + return autoConvert_core_ControllerInstallationStatus_To_v1alpha1_ControllerInstallationStatus(in, out, s) +} + +func autoConvert_v1alpha1_ControllerRegistration_To_core_ControllerRegistration(in *ControllerRegistration, out *core.ControllerRegistration, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_ControllerRegistration_To_core_ControllerRegistration is an autogenerated conversion function. +func Convert_v1alpha1_ControllerRegistration_To_core_ControllerRegistration(in *ControllerRegistration, out *core.ControllerRegistration, s conversion.Scope) error { + return autoConvert_v1alpha1_ControllerRegistration_To_core_ControllerRegistration(in, out, s) +} + +func autoConvert_core_ControllerRegistration_To_v1alpha1_ControllerRegistration(in *core.ControllerRegistration, out *ControllerRegistration, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ControllerRegistrationSpec_To_v1alpha1_ControllerRegistrationSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_core_ControllerRegistration_To_v1alpha1_ControllerRegistration is an autogenerated conversion function. +func Convert_core_ControllerRegistration_To_v1alpha1_ControllerRegistration(in *core.ControllerRegistration, out *ControllerRegistration, s conversion.Scope) error { + return autoConvert_core_ControllerRegistration_To_v1alpha1_ControllerRegistration(in, out, s) +} + +func autoConvert_v1alpha1_ControllerRegistrationList_To_core_ControllerRegistrationList(in *ControllerRegistrationList, out *core.ControllerRegistrationList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ControllerRegistration)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_ControllerRegistrationList_To_core_ControllerRegistrationList is an autogenerated conversion function. +func Convert_v1alpha1_ControllerRegistrationList_To_core_ControllerRegistrationList(in *ControllerRegistrationList, out *core.ControllerRegistrationList, s conversion.Scope) error { + return autoConvert_v1alpha1_ControllerRegistrationList_To_core_ControllerRegistrationList(in, out, s) +} + +func autoConvert_core_ControllerRegistrationList_To_v1alpha1_ControllerRegistrationList(in *core.ControllerRegistrationList, out *ControllerRegistrationList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]ControllerRegistration)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ControllerRegistrationList_To_v1alpha1_ControllerRegistrationList is an autogenerated conversion function. +func Convert_core_ControllerRegistrationList_To_v1alpha1_ControllerRegistrationList(in *core.ControllerRegistrationList, out *ControllerRegistrationList, s conversion.Scope) error { + return autoConvert_core_ControllerRegistrationList_To_v1alpha1_ControllerRegistrationList(in, out, s) +} + +func autoConvert_v1alpha1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(in *ControllerRegistrationSpec, out *core.ControllerRegistrationSpec, s conversion.Scope) error { + out.Resources = *(*[]core.ControllerResource)(unsafe.Pointer(&in.Resources)) + out.Deployment = (*core.ControllerDeployment)(unsafe.Pointer(in.Deployment)) + return nil +} + +// Convert_v1alpha1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec is an autogenerated conversion function. +func Convert_v1alpha1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(in *ControllerRegistrationSpec, out *core.ControllerRegistrationSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(in, out, s) +} + +func autoConvert_core_ControllerRegistrationSpec_To_v1alpha1_ControllerRegistrationSpec(in *core.ControllerRegistrationSpec, out *ControllerRegistrationSpec, s conversion.Scope) error { + out.Resources = *(*[]ControllerResource)(unsafe.Pointer(&in.Resources)) + out.Deployment = (*ControllerDeployment)(unsafe.Pointer(in.Deployment)) + return nil +} + +// Convert_core_ControllerRegistrationSpec_To_v1alpha1_ControllerRegistrationSpec is an autogenerated conversion function. +func Convert_core_ControllerRegistrationSpec_To_v1alpha1_ControllerRegistrationSpec(in *core.ControllerRegistrationSpec, out *ControllerRegistrationSpec, s conversion.Scope) error { + return autoConvert_core_ControllerRegistrationSpec_To_v1alpha1_ControllerRegistrationSpec(in, out, s) +} + +func autoConvert_v1alpha1_ControllerResource_To_core_ControllerResource(in *ControllerResource, out *core.ControllerResource, s conversion.Scope) error { + out.Kind = in.Kind + out.Type = in.Type + out.GloballyEnabled = (*bool)(unsafe.Pointer(in.GloballyEnabled)) + out.ReconcileTimeout = (*metav1.Duration)(unsafe.Pointer(in.ReconcileTimeout)) + out.Primary = (*bool)(unsafe.Pointer(in.Primary)) + return nil +} + +// Convert_v1alpha1_ControllerResource_To_core_ControllerResource is an autogenerated conversion function. +func Convert_v1alpha1_ControllerResource_To_core_ControllerResource(in *ControllerResource, out *core.ControllerResource, s conversion.Scope) error { + return autoConvert_v1alpha1_ControllerResource_To_core_ControllerResource(in, out, s) +} + +func autoConvert_core_ControllerResource_To_v1alpha1_ControllerResource(in *core.ControllerResource, out *ControllerResource, s conversion.Scope) error { + out.Kind = in.Kind + out.Type = in.Type + out.GloballyEnabled = (*bool)(unsafe.Pointer(in.GloballyEnabled)) + out.ReconcileTimeout = (*metav1.Duration)(unsafe.Pointer(in.ReconcileTimeout)) + out.Primary = (*bool)(unsafe.Pointer(in.Primary)) + return nil +} + +// Convert_core_ControllerResource_To_v1alpha1_ControllerResource is an autogenerated conversion function. +func Convert_core_ControllerResource_To_v1alpha1_ControllerResource(in *core.ControllerResource, out *ControllerResource, s conversion.Scope) error { + return autoConvert_core_ControllerResource_To_v1alpha1_ControllerResource(in, out, s) +} + +func autoConvert_v1alpha1_DNS_To_core_DNS(in *DNS, out *core.DNS, s conversion.Scope) error { + out.Domain = (*string)(unsafe.Pointer(in.Domain)) + out.Providers = *(*[]core.DNSProvider)(unsafe.Pointer(&in.Providers)) + return nil +} + +// Convert_v1alpha1_DNS_To_core_DNS is an autogenerated conversion function. +func Convert_v1alpha1_DNS_To_core_DNS(in *DNS, out *core.DNS, s conversion.Scope) error { + return autoConvert_v1alpha1_DNS_To_core_DNS(in, out, s) +} + +func autoConvert_core_DNS_To_v1alpha1_DNS(in *core.DNS, out *DNS, s conversion.Scope) error { + out.Domain = (*string)(unsafe.Pointer(in.Domain)) + out.Providers = *(*[]DNSProvider)(unsafe.Pointer(&in.Providers)) + return nil +} + +// Convert_core_DNS_To_v1alpha1_DNS is an autogenerated conversion function. +func Convert_core_DNS_To_v1alpha1_DNS(in *core.DNS, out *DNS, s conversion.Scope) error { + return autoConvert_core_DNS_To_v1alpha1_DNS(in, out, s) +} + +func autoConvert_v1alpha1_DNSIncludeExclude_To_core_DNSIncludeExclude(in *DNSIncludeExclude, out *core.DNSIncludeExclude, s conversion.Scope) error { + out.Include = *(*[]string)(unsafe.Pointer(&in.Include)) + out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude)) + return nil +} + +// Convert_v1alpha1_DNSIncludeExclude_To_core_DNSIncludeExclude is an autogenerated conversion function. +func Convert_v1alpha1_DNSIncludeExclude_To_core_DNSIncludeExclude(in *DNSIncludeExclude, out *core.DNSIncludeExclude, s conversion.Scope) error { + return autoConvert_v1alpha1_DNSIncludeExclude_To_core_DNSIncludeExclude(in, out, s) +} + +func autoConvert_core_DNSIncludeExclude_To_v1alpha1_DNSIncludeExclude(in *core.DNSIncludeExclude, out *DNSIncludeExclude, s conversion.Scope) error { + out.Include = *(*[]string)(unsafe.Pointer(&in.Include)) + out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude)) + return nil +} + +// Convert_core_DNSIncludeExclude_To_v1alpha1_DNSIncludeExclude is an autogenerated conversion function. +func Convert_core_DNSIncludeExclude_To_v1alpha1_DNSIncludeExclude(in *core.DNSIncludeExclude, out *DNSIncludeExclude, s conversion.Scope) error { + return autoConvert_core_DNSIncludeExclude_To_v1alpha1_DNSIncludeExclude(in, out, s) +} + +func autoConvert_v1alpha1_DNSProvider_To_core_DNSProvider(in *DNSProvider, out *core.DNSProvider, s conversion.Scope) error { + out.Domains = (*core.DNSIncludeExclude)(unsafe.Pointer(in.Domains)) + out.Primary = (*bool)(unsafe.Pointer(in.Primary)) + out.SecretName = (*string)(unsafe.Pointer(in.SecretName)) + out.Type = (*string)(unsafe.Pointer(in.Type)) + out.Zones = (*core.DNSIncludeExclude)(unsafe.Pointer(in.Zones)) + return nil +} + +// Convert_v1alpha1_DNSProvider_To_core_DNSProvider is an autogenerated conversion function. +func Convert_v1alpha1_DNSProvider_To_core_DNSProvider(in *DNSProvider, out *core.DNSProvider, s conversion.Scope) error { + return autoConvert_v1alpha1_DNSProvider_To_core_DNSProvider(in, out, s) +} + +func autoConvert_core_DNSProvider_To_v1alpha1_DNSProvider(in *core.DNSProvider, out *DNSProvider, s conversion.Scope) error { + out.Domains = (*DNSIncludeExclude)(unsafe.Pointer(in.Domains)) + out.Primary = (*bool)(unsafe.Pointer(in.Primary)) + out.SecretName = (*string)(unsafe.Pointer(in.SecretName)) + out.Type = (*string)(unsafe.Pointer(in.Type)) + out.Zones = (*DNSIncludeExclude)(unsafe.Pointer(in.Zones)) + return nil +} + +// Convert_core_DNSProvider_To_v1alpha1_DNSProvider is an autogenerated conversion function. +func Convert_core_DNSProvider_To_v1alpha1_DNSProvider(in *core.DNSProvider, out *DNSProvider, s conversion.Scope) error { + return autoConvert_core_DNSProvider_To_v1alpha1_DNSProvider(in, out, s) +} + +func autoConvert_v1alpha1_DataVolume_To_core_DataVolume(in *DataVolume, out *core.DataVolume, s conversion.Scope) error { + out.Name = in.Name + out.Type = (*string)(unsafe.Pointer(in.Type)) + out.VolumeSize = in.VolumeSize + out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted)) + return nil +} + +// Convert_v1alpha1_DataVolume_To_core_DataVolume is an autogenerated conversion function. +func Convert_v1alpha1_DataVolume_To_core_DataVolume(in *DataVolume, out *core.DataVolume, s conversion.Scope) error { + return autoConvert_v1alpha1_DataVolume_To_core_DataVolume(in, out, s) +} + +func autoConvert_core_DataVolume_To_v1alpha1_DataVolume(in *core.DataVolume, out *DataVolume, s conversion.Scope) error { + out.Name = in.Name + out.Type = (*string)(unsafe.Pointer(in.Type)) + out.VolumeSize = in.VolumeSize + out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted)) + return nil +} + +// Convert_core_DataVolume_To_v1alpha1_DataVolume is an autogenerated conversion function. +func Convert_core_DataVolume_To_v1alpha1_DataVolume(in *core.DataVolume, out *DataVolume, s conversion.Scope) error { + return autoConvert_core_DataVolume_To_v1alpha1_DataVolume(in, out, s) +} + +func autoConvert_v1alpha1_Endpoint_To_core_Endpoint(in *Endpoint, out *core.Endpoint, s conversion.Scope) error { + out.Name = in.Name + out.URL = in.URL + out.Purpose = in.Purpose + return nil +} + +// Convert_v1alpha1_Endpoint_To_core_Endpoint is an autogenerated conversion function. +func Convert_v1alpha1_Endpoint_To_core_Endpoint(in *Endpoint, out *core.Endpoint, s conversion.Scope) error { + return autoConvert_v1alpha1_Endpoint_To_core_Endpoint(in, out, s) +} + +func autoConvert_core_Endpoint_To_v1alpha1_Endpoint(in *core.Endpoint, out *Endpoint, s conversion.Scope) error { + out.Name = in.Name + out.URL = in.URL + out.Purpose = in.Purpose + return nil +} + +// Convert_core_Endpoint_To_v1alpha1_Endpoint is an autogenerated conversion function. +func Convert_core_Endpoint_To_v1alpha1_Endpoint(in *core.Endpoint, out *Endpoint, s conversion.Scope) error { + return autoConvert_core_Endpoint_To_v1alpha1_Endpoint(in, out, s) +} + +func autoConvert_v1alpha1_ExpirableVersion_To_core_ExpirableVersion(in *ExpirableVersion, out *core.ExpirableVersion, s conversion.Scope) error { + out.Version = in.Version + out.ExpirationDate = (*metav1.Time)(unsafe.Pointer(in.ExpirationDate)) + out.Classification = (*core.VersionClassification)(unsafe.Pointer(in.Classification)) + return nil +} + +// Convert_v1alpha1_ExpirableVersion_To_core_ExpirableVersion is an autogenerated conversion function. +func Convert_v1alpha1_ExpirableVersion_To_core_ExpirableVersion(in *ExpirableVersion, out *core.ExpirableVersion, s conversion.Scope) error { + return autoConvert_v1alpha1_ExpirableVersion_To_core_ExpirableVersion(in, out, s) +} + +func autoConvert_core_ExpirableVersion_To_v1alpha1_ExpirableVersion(in *core.ExpirableVersion, out *ExpirableVersion, s conversion.Scope) error { + out.Version = in.Version + out.ExpirationDate = (*metav1.Time)(unsafe.Pointer(in.ExpirationDate)) + out.Classification = (*VersionClassification)(unsafe.Pointer(in.Classification)) + return nil +} + +// Convert_core_ExpirableVersion_To_v1alpha1_ExpirableVersion is an autogenerated conversion function. +func Convert_core_ExpirableVersion_To_v1alpha1_ExpirableVersion(in *core.ExpirableVersion, out *ExpirableVersion, s conversion.Scope) error { + return autoConvert_core_ExpirableVersion_To_v1alpha1_ExpirableVersion(in, out, s) +} + +func autoConvert_v1alpha1_Extension_To_core_Extension(in *Extension, out *core.Extension, s conversion.Scope) error { + out.Type = in.Type + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.Disabled = (*bool)(unsafe.Pointer(in.Disabled)) + return nil +} + +// Convert_v1alpha1_Extension_To_core_Extension is an autogenerated conversion function. +func Convert_v1alpha1_Extension_To_core_Extension(in *Extension, out *core.Extension, s conversion.Scope) error { + return autoConvert_v1alpha1_Extension_To_core_Extension(in, out, s) +} + +func autoConvert_core_Extension_To_v1alpha1_Extension(in *core.Extension, out *Extension, s conversion.Scope) error { + out.Type = in.Type + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.Disabled = (*bool)(unsafe.Pointer(in.Disabled)) + return nil +} + +// Convert_core_Extension_To_v1alpha1_Extension is an autogenerated conversion function. +func Convert_core_Extension_To_v1alpha1_Extension(in *core.Extension, out *Extension, s conversion.Scope) error { + return autoConvert_core_Extension_To_v1alpha1_Extension(in, out, s) +} + +func autoConvert_v1alpha1_ExtensionResourceState_To_core_ExtensionResourceState(in *ExtensionResourceState, out *core.ExtensionResourceState, s conversion.Scope) error { + out.Kind = in.Kind + out.Name = (*string)(unsafe.Pointer(in.Name)) + out.Purpose = (*string)(unsafe.Pointer(in.Purpose)) + out.State = (*runtime.RawExtension)(unsafe.Pointer(in.State)) + out.Resources = *(*[]core.NamedResourceReference)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_v1alpha1_ExtensionResourceState_To_core_ExtensionResourceState is an autogenerated conversion function. +func Convert_v1alpha1_ExtensionResourceState_To_core_ExtensionResourceState(in *ExtensionResourceState, out *core.ExtensionResourceState, s conversion.Scope) error { + return autoConvert_v1alpha1_ExtensionResourceState_To_core_ExtensionResourceState(in, out, s) +} + +func autoConvert_core_ExtensionResourceState_To_v1alpha1_ExtensionResourceState(in *core.ExtensionResourceState, out *ExtensionResourceState, s conversion.Scope) error { + out.Kind = in.Kind + out.Name = (*string)(unsafe.Pointer(in.Name)) + out.Purpose = (*string)(unsafe.Pointer(in.Purpose)) + out.State = (*runtime.RawExtension)(unsafe.Pointer(in.State)) + out.Resources = *(*[]v1beta1.NamedResourceReference)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_core_ExtensionResourceState_To_v1alpha1_ExtensionResourceState is an autogenerated conversion function. +func Convert_core_ExtensionResourceState_To_v1alpha1_ExtensionResourceState(in *core.ExtensionResourceState, out *ExtensionResourceState, s conversion.Scope) error { + return autoConvert_core_ExtensionResourceState_To_v1alpha1_ExtensionResourceState(in, out, s) +} + +func autoConvert_v1alpha1_Gardener_To_core_Gardener(in *Gardener, out *core.Gardener, s conversion.Scope) error { + out.ID = in.ID + out.Name = in.Name + out.Version = in.Version + return nil +} + +// Convert_v1alpha1_Gardener_To_core_Gardener is an autogenerated conversion function. +func Convert_v1alpha1_Gardener_To_core_Gardener(in *Gardener, out *core.Gardener, s conversion.Scope) error { + return autoConvert_v1alpha1_Gardener_To_core_Gardener(in, out, s) +} + +func autoConvert_core_Gardener_To_v1alpha1_Gardener(in *core.Gardener, out *Gardener, s conversion.Scope) error { + out.ID = in.ID + out.Name = in.Name + out.Version = in.Version + return nil +} + +// Convert_core_Gardener_To_v1alpha1_Gardener is an autogenerated conversion function. +func Convert_core_Gardener_To_v1alpha1_Gardener(in *core.Gardener, out *Gardener, s conversion.Scope) error { + return autoConvert_core_Gardener_To_v1alpha1_Gardener(in, out, s) +} + +func autoConvert_v1alpha1_GardenerResourceData_To_core_GardenerResourceData(in *GardenerResourceData, out *core.GardenerResourceData, s conversion.Scope) error { + out.Name = in.Name + out.Type = in.Type + out.Data = in.Data + return nil +} + +// Convert_v1alpha1_GardenerResourceData_To_core_GardenerResourceData is an autogenerated conversion function. +func Convert_v1alpha1_GardenerResourceData_To_core_GardenerResourceData(in *GardenerResourceData, out *core.GardenerResourceData, s conversion.Scope) error { + return autoConvert_v1alpha1_GardenerResourceData_To_core_GardenerResourceData(in, out, s) +} + +func autoConvert_core_GardenerResourceData_To_v1alpha1_GardenerResourceData(in *core.GardenerResourceData, out *GardenerResourceData, s conversion.Scope) error { + out.Name = in.Name + out.Type = in.Type + out.Data = in.Data + return nil +} + +// Convert_core_GardenerResourceData_To_v1alpha1_GardenerResourceData is an autogenerated conversion function. +func Convert_core_GardenerResourceData_To_v1alpha1_GardenerResourceData(in *core.GardenerResourceData, out *GardenerResourceData, s conversion.Scope) error { + return autoConvert_core_GardenerResourceData_To_v1alpha1_GardenerResourceData(in, out, s) +} + +func autoConvert_v1alpha1_Hibernation_To_core_Hibernation(in *Hibernation, out *core.Hibernation, s conversion.Scope) error { + out.Enabled = (*bool)(unsafe.Pointer(in.Enabled)) + out.Schedules = *(*[]core.HibernationSchedule)(unsafe.Pointer(&in.Schedules)) + return nil +} + +// Convert_v1alpha1_Hibernation_To_core_Hibernation is an autogenerated conversion function. +func Convert_v1alpha1_Hibernation_To_core_Hibernation(in *Hibernation, out *core.Hibernation, s conversion.Scope) error { + return autoConvert_v1alpha1_Hibernation_To_core_Hibernation(in, out, s) +} + +func autoConvert_core_Hibernation_To_v1alpha1_Hibernation(in *core.Hibernation, out *Hibernation, s conversion.Scope) error { + out.Enabled = (*bool)(unsafe.Pointer(in.Enabled)) + out.Schedules = *(*[]HibernationSchedule)(unsafe.Pointer(&in.Schedules)) + return nil +} + +// Convert_core_Hibernation_To_v1alpha1_Hibernation is an autogenerated conversion function. +func Convert_core_Hibernation_To_v1alpha1_Hibernation(in *core.Hibernation, out *Hibernation, s conversion.Scope) error { + return autoConvert_core_Hibernation_To_v1alpha1_Hibernation(in, out, s) +} + +func autoConvert_v1alpha1_HibernationSchedule_To_core_HibernationSchedule(in *HibernationSchedule, out *core.HibernationSchedule, s conversion.Scope) error { + out.Start = (*string)(unsafe.Pointer(in.Start)) + out.End = (*string)(unsafe.Pointer(in.End)) + out.Location = (*string)(unsafe.Pointer(in.Location)) + return nil +} + +// Convert_v1alpha1_HibernationSchedule_To_core_HibernationSchedule is an autogenerated conversion function. +func Convert_v1alpha1_HibernationSchedule_To_core_HibernationSchedule(in *HibernationSchedule, out *core.HibernationSchedule, s conversion.Scope) error { + return autoConvert_v1alpha1_HibernationSchedule_To_core_HibernationSchedule(in, out, s) +} + +func autoConvert_core_HibernationSchedule_To_v1alpha1_HibernationSchedule(in *core.HibernationSchedule, out *HibernationSchedule, s conversion.Scope) error { + out.Start = (*string)(unsafe.Pointer(in.Start)) + out.End = (*string)(unsafe.Pointer(in.End)) + out.Location = (*string)(unsafe.Pointer(in.Location)) + return nil +} + +// Convert_core_HibernationSchedule_To_v1alpha1_HibernationSchedule is an autogenerated conversion function. +func Convert_core_HibernationSchedule_To_v1alpha1_HibernationSchedule(in *core.HibernationSchedule, out *HibernationSchedule, s conversion.Scope) error { + return autoConvert_core_HibernationSchedule_To_v1alpha1_HibernationSchedule(in, out, s) +} + +func autoConvert_v1alpha1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig(in *HorizontalPodAutoscalerConfig, out *core.HorizontalPodAutoscalerConfig, s conversion.Scope) error { + out.CPUInitializationPeriod = (*metav1.Duration)(unsafe.Pointer(in.CPUInitializationPeriod)) + out.DownscaleDelay = (*metav1.Duration)(unsafe.Pointer(in.DownscaleDelay)) + out.DownscaleStabilization = (*metav1.Duration)(unsafe.Pointer(in.DownscaleStabilization)) + out.InitialReadinessDelay = (*metav1.Duration)(unsafe.Pointer(in.InitialReadinessDelay)) + out.SyncPeriod = (*metav1.Duration)(unsafe.Pointer(in.SyncPeriod)) + out.Tolerance = (*float64)(unsafe.Pointer(in.Tolerance)) + out.UpscaleDelay = (*metav1.Duration)(unsafe.Pointer(in.UpscaleDelay)) + return nil +} + +// Convert_v1alpha1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig is an autogenerated conversion function. +func Convert_v1alpha1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig(in *HorizontalPodAutoscalerConfig, out *core.HorizontalPodAutoscalerConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig(in, out, s) +} + +func autoConvert_core_HorizontalPodAutoscalerConfig_To_v1alpha1_HorizontalPodAutoscalerConfig(in *core.HorizontalPodAutoscalerConfig, out *HorizontalPodAutoscalerConfig, s conversion.Scope) error { + out.CPUInitializationPeriod = (*metav1.Duration)(unsafe.Pointer(in.CPUInitializationPeriod)) + out.DownscaleDelay = (*metav1.Duration)(unsafe.Pointer(in.DownscaleDelay)) + out.DownscaleStabilization = (*metav1.Duration)(unsafe.Pointer(in.DownscaleStabilization)) + out.InitialReadinessDelay = (*metav1.Duration)(unsafe.Pointer(in.InitialReadinessDelay)) + out.SyncPeriod = (*metav1.Duration)(unsafe.Pointer(in.SyncPeriod)) + out.Tolerance = (*float64)(unsafe.Pointer(in.Tolerance)) + out.UpscaleDelay = (*metav1.Duration)(unsafe.Pointer(in.UpscaleDelay)) + return nil +} + +// Convert_core_HorizontalPodAutoscalerConfig_To_v1alpha1_HorizontalPodAutoscalerConfig is an autogenerated conversion function. +func Convert_core_HorizontalPodAutoscalerConfig_To_v1alpha1_HorizontalPodAutoscalerConfig(in *core.HorizontalPodAutoscalerConfig, out *HorizontalPodAutoscalerConfig, s conversion.Scope) error { + return autoConvert_core_HorizontalPodAutoscalerConfig_To_v1alpha1_HorizontalPodAutoscalerConfig(in, out, s) +} + +func autoConvert_v1alpha1_Ingress_To_core_Ingress(in *Ingress, out *core.Ingress, s conversion.Scope) error { + out.Domain = in.Domain + if err := Convert_v1alpha1_IngressController_To_core_IngressController(&in.Controller, &out.Controller, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_Ingress_To_core_Ingress is an autogenerated conversion function. +func Convert_v1alpha1_Ingress_To_core_Ingress(in *Ingress, out *core.Ingress, s conversion.Scope) error { + return autoConvert_v1alpha1_Ingress_To_core_Ingress(in, out, s) +} + +func autoConvert_core_Ingress_To_v1alpha1_Ingress(in *core.Ingress, out *Ingress, s conversion.Scope) error { + out.Domain = in.Domain + if err := Convert_core_IngressController_To_v1alpha1_IngressController(&in.Controller, &out.Controller, s); err != nil { + return err + } + return nil +} + +// Convert_core_Ingress_To_v1alpha1_Ingress is an autogenerated conversion function. +func Convert_core_Ingress_To_v1alpha1_Ingress(in *core.Ingress, out *Ingress, s conversion.Scope) error { + return autoConvert_core_Ingress_To_v1alpha1_Ingress(in, out, s) +} + +func autoConvert_v1alpha1_IngressController_To_core_IngressController(in *IngressController, out *core.IngressController, s conversion.Scope) error { + out.Kind = in.Kind + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + return nil +} + +// Convert_v1alpha1_IngressController_To_core_IngressController is an autogenerated conversion function. +func Convert_v1alpha1_IngressController_To_core_IngressController(in *IngressController, out *core.IngressController, s conversion.Scope) error { + return autoConvert_v1alpha1_IngressController_To_core_IngressController(in, out, s) +} + +func autoConvert_core_IngressController_To_v1alpha1_IngressController(in *core.IngressController, out *IngressController, s conversion.Scope) error { + out.Kind = in.Kind + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + return nil +} + +// Convert_core_IngressController_To_v1alpha1_IngressController is an autogenerated conversion function. +func Convert_core_IngressController_To_v1alpha1_IngressController(in *core.IngressController, out *IngressController, s conversion.Scope) error { + return autoConvert_core_IngressController_To_v1alpha1_IngressController(in, out, s) +} + +func autoConvert_v1alpha1_KubeAPIServerConfig_To_core_KubeAPIServerConfig(in *KubeAPIServerConfig, out *core.KubeAPIServerConfig, s conversion.Scope) error { + if err := Convert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + out.AdmissionPlugins = *(*[]core.AdmissionPlugin)(unsafe.Pointer(&in.AdmissionPlugins)) + out.APIAudiences = *(*[]string)(unsafe.Pointer(&in.APIAudiences)) + out.AuditConfig = (*core.AuditConfig)(unsafe.Pointer(in.AuditConfig)) + out.EnableBasicAuthentication = (*bool)(unsafe.Pointer(in.EnableBasicAuthentication)) + out.OIDCConfig = (*core.OIDCConfig)(unsafe.Pointer(in.OIDCConfig)) + out.RuntimeConfig = *(*map[string]bool)(unsafe.Pointer(&in.RuntimeConfig)) + out.ServiceAccountConfig = (*core.ServiceAccountConfig)(unsafe.Pointer(in.ServiceAccountConfig)) + out.WatchCacheSizes = (*core.WatchCacheSizes)(unsafe.Pointer(in.WatchCacheSizes)) + out.Requests = (*core.KubeAPIServerRequests)(unsafe.Pointer(in.Requests)) + return nil +} + +// Convert_v1alpha1_KubeAPIServerConfig_To_core_KubeAPIServerConfig is an autogenerated conversion function. +func Convert_v1alpha1_KubeAPIServerConfig_To_core_KubeAPIServerConfig(in *KubeAPIServerConfig, out *core.KubeAPIServerConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeAPIServerConfig_To_core_KubeAPIServerConfig(in, out, s) +} + +func autoConvert_core_KubeAPIServerConfig_To_v1alpha1_KubeAPIServerConfig(in *core.KubeAPIServerConfig, out *KubeAPIServerConfig, s conversion.Scope) error { + if err := Convert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + out.AdmissionPlugins = *(*[]AdmissionPlugin)(unsafe.Pointer(&in.AdmissionPlugins)) + out.APIAudiences = *(*[]string)(unsafe.Pointer(&in.APIAudiences)) + out.AuditConfig = (*AuditConfig)(unsafe.Pointer(in.AuditConfig)) + out.EnableBasicAuthentication = (*bool)(unsafe.Pointer(in.EnableBasicAuthentication)) + out.OIDCConfig = (*OIDCConfig)(unsafe.Pointer(in.OIDCConfig)) + out.RuntimeConfig = *(*map[string]bool)(unsafe.Pointer(&in.RuntimeConfig)) + out.ServiceAccountConfig = (*ServiceAccountConfig)(unsafe.Pointer(in.ServiceAccountConfig)) + out.WatchCacheSizes = (*WatchCacheSizes)(unsafe.Pointer(in.WatchCacheSizes)) + out.Requests = (*KubeAPIServerRequests)(unsafe.Pointer(in.Requests)) + return nil +} + +// Convert_core_KubeAPIServerConfig_To_v1alpha1_KubeAPIServerConfig is an autogenerated conversion function. +func Convert_core_KubeAPIServerConfig_To_v1alpha1_KubeAPIServerConfig(in *core.KubeAPIServerConfig, out *KubeAPIServerConfig, s conversion.Scope) error { + return autoConvert_core_KubeAPIServerConfig_To_v1alpha1_KubeAPIServerConfig(in, out, s) +} + +func autoConvert_v1alpha1_KubeAPIServerRequests_To_core_KubeAPIServerRequests(in *KubeAPIServerRequests, out *core.KubeAPIServerRequests, s conversion.Scope) error { + out.MaxNonMutatingInflight = (*int32)(unsafe.Pointer(in.MaxNonMutatingInflight)) + out.MaxMutatingInflight = (*int32)(unsafe.Pointer(in.MaxMutatingInflight)) + return nil +} + +// Convert_v1alpha1_KubeAPIServerRequests_To_core_KubeAPIServerRequests is an autogenerated conversion function. +func Convert_v1alpha1_KubeAPIServerRequests_To_core_KubeAPIServerRequests(in *KubeAPIServerRequests, out *core.KubeAPIServerRequests, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeAPIServerRequests_To_core_KubeAPIServerRequests(in, out, s) +} + +func autoConvert_core_KubeAPIServerRequests_To_v1alpha1_KubeAPIServerRequests(in *core.KubeAPIServerRequests, out *KubeAPIServerRequests, s conversion.Scope) error { + out.MaxNonMutatingInflight = (*int32)(unsafe.Pointer(in.MaxNonMutatingInflight)) + out.MaxMutatingInflight = (*int32)(unsafe.Pointer(in.MaxMutatingInflight)) + return nil +} + +// Convert_core_KubeAPIServerRequests_To_v1alpha1_KubeAPIServerRequests is an autogenerated conversion function. +func Convert_core_KubeAPIServerRequests_To_v1alpha1_KubeAPIServerRequests(in *core.KubeAPIServerRequests, out *KubeAPIServerRequests, s conversion.Scope) error { + return autoConvert_core_KubeAPIServerRequests_To_v1alpha1_KubeAPIServerRequests(in, out, s) +} + +func autoConvert_v1alpha1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig(in *KubeControllerManagerConfig, out *core.KubeControllerManagerConfig, s conversion.Scope) error { + if err := Convert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + out.HorizontalPodAutoscalerConfig = (*core.HorizontalPodAutoscalerConfig)(unsafe.Pointer(in.HorizontalPodAutoscalerConfig)) + out.NodeCIDRMaskSize = (*int32)(unsafe.Pointer(in.NodeCIDRMaskSize)) + out.PodEvictionTimeout = (*metav1.Duration)(unsafe.Pointer(in.PodEvictionTimeout)) + return nil +} + +// Convert_v1alpha1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig is an autogenerated conversion function. +func Convert_v1alpha1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig(in *KubeControllerManagerConfig, out *core.KubeControllerManagerConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig(in, out, s) +} + +func autoConvert_core_KubeControllerManagerConfig_To_v1alpha1_KubeControllerManagerConfig(in *core.KubeControllerManagerConfig, out *KubeControllerManagerConfig, s conversion.Scope) error { + if err := Convert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + out.HorizontalPodAutoscalerConfig = (*HorizontalPodAutoscalerConfig)(unsafe.Pointer(in.HorizontalPodAutoscalerConfig)) + out.NodeCIDRMaskSize = (*int32)(unsafe.Pointer(in.NodeCIDRMaskSize)) + out.PodEvictionTimeout = (*metav1.Duration)(unsafe.Pointer(in.PodEvictionTimeout)) + return nil +} + +// Convert_core_KubeControllerManagerConfig_To_v1alpha1_KubeControllerManagerConfig is an autogenerated conversion function. +func Convert_core_KubeControllerManagerConfig_To_v1alpha1_KubeControllerManagerConfig(in *core.KubeControllerManagerConfig, out *KubeControllerManagerConfig, s conversion.Scope) error { + return autoConvert_core_KubeControllerManagerConfig_To_v1alpha1_KubeControllerManagerConfig(in, out, s) +} + +func autoConvert_v1alpha1_KubeProxyConfig_To_core_KubeProxyConfig(in *KubeProxyConfig, out *core.KubeProxyConfig, s conversion.Scope) error { + if err := Convert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + out.Mode = (*core.ProxyMode)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_v1alpha1_KubeProxyConfig_To_core_KubeProxyConfig is an autogenerated conversion function. +func Convert_v1alpha1_KubeProxyConfig_To_core_KubeProxyConfig(in *KubeProxyConfig, out *core.KubeProxyConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeProxyConfig_To_core_KubeProxyConfig(in, out, s) +} + +func autoConvert_core_KubeProxyConfig_To_v1alpha1_KubeProxyConfig(in *core.KubeProxyConfig, out *KubeProxyConfig, s conversion.Scope) error { + if err := Convert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + out.Mode = (*ProxyMode)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_core_KubeProxyConfig_To_v1alpha1_KubeProxyConfig is an autogenerated conversion function. +func Convert_core_KubeProxyConfig_To_v1alpha1_KubeProxyConfig(in *core.KubeProxyConfig, out *KubeProxyConfig, s conversion.Scope) error { + return autoConvert_core_KubeProxyConfig_To_v1alpha1_KubeProxyConfig(in, out, s) +} + +func autoConvert_v1alpha1_KubeSchedulerConfig_To_core_KubeSchedulerConfig(in *KubeSchedulerConfig, out *core.KubeSchedulerConfig, s conversion.Scope) error { + if err := Convert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + out.KubeMaxPDVols = (*string)(unsafe.Pointer(in.KubeMaxPDVols)) + return nil +} + +// Convert_v1alpha1_KubeSchedulerConfig_To_core_KubeSchedulerConfig is an autogenerated conversion function. +func Convert_v1alpha1_KubeSchedulerConfig_To_core_KubeSchedulerConfig(in *KubeSchedulerConfig, out *core.KubeSchedulerConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeSchedulerConfig_To_core_KubeSchedulerConfig(in, out, s) +} + +func autoConvert_core_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(in *core.KubeSchedulerConfig, out *KubeSchedulerConfig, s conversion.Scope) error { + if err := Convert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + out.KubeMaxPDVols = (*string)(unsafe.Pointer(in.KubeMaxPDVols)) + return nil +} + +// Convert_core_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig is an autogenerated conversion function. +func Convert_core_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(in *core.KubeSchedulerConfig, out *KubeSchedulerConfig, s conversion.Scope) error { + return autoConvert_core_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(in, out, s) +} + +func autoConvert_v1alpha1_KubeletConfig_To_core_KubeletConfig(in *KubeletConfig, out *core.KubeletConfig, s conversion.Scope) error { + if err := Convert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + out.CPUCFSQuota = (*bool)(unsafe.Pointer(in.CPUCFSQuota)) + out.CPUManagerPolicy = (*string)(unsafe.Pointer(in.CPUManagerPolicy)) + out.EvictionHard = (*core.KubeletConfigEviction)(unsafe.Pointer(in.EvictionHard)) + out.EvictionMaxPodGracePeriod = (*int32)(unsafe.Pointer(in.EvictionMaxPodGracePeriod)) + out.EvictionMinimumReclaim = (*core.KubeletConfigEvictionMinimumReclaim)(unsafe.Pointer(in.EvictionMinimumReclaim)) + out.EvictionPressureTransitionPeriod = (*metav1.Duration)(unsafe.Pointer(in.EvictionPressureTransitionPeriod)) + out.EvictionSoft = (*core.KubeletConfigEviction)(unsafe.Pointer(in.EvictionSoft)) + out.EvictionSoftGracePeriod = (*core.KubeletConfigEvictionSoftGracePeriod)(unsafe.Pointer(in.EvictionSoftGracePeriod)) + out.MaxPods = (*int32)(unsafe.Pointer(in.MaxPods)) + out.PodPIDsLimit = (*int64)(unsafe.Pointer(in.PodPIDsLimit)) + out.ImagePullProgressDeadline = (*metav1.Duration)(unsafe.Pointer(in.ImagePullProgressDeadline)) + out.FailSwapOn = (*bool)(unsafe.Pointer(in.FailSwapOn)) + out.KubeReserved = (*core.KubeletConfigReserved)(unsafe.Pointer(in.KubeReserved)) + out.SystemReserved = (*core.KubeletConfigReserved)(unsafe.Pointer(in.SystemReserved)) + return nil +} + +// Convert_v1alpha1_KubeletConfig_To_core_KubeletConfig is an autogenerated conversion function. +func Convert_v1alpha1_KubeletConfig_To_core_KubeletConfig(in *KubeletConfig, out *core.KubeletConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeletConfig_To_core_KubeletConfig(in, out, s) +} + +func autoConvert_core_KubeletConfig_To_v1alpha1_KubeletConfig(in *core.KubeletConfig, out *KubeletConfig, s conversion.Scope) error { + if err := Convert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + out.CPUCFSQuota = (*bool)(unsafe.Pointer(in.CPUCFSQuota)) + out.CPUManagerPolicy = (*string)(unsafe.Pointer(in.CPUManagerPolicy)) + out.EvictionHard = (*KubeletConfigEviction)(unsafe.Pointer(in.EvictionHard)) + out.EvictionMaxPodGracePeriod = (*int32)(unsafe.Pointer(in.EvictionMaxPodGracePeriod)) + out.EvictionMinimumReclaim = (*KubeletConfigEvictionMinimumReclaim)(unsafe.Pointer(in.EvictionMinimumReclaim)) + out.EvictionPressureTransitionPeriod = (*metav1.Duration)(unsafe.Pointer(in.EvictionPressureTransitionPeriod)) + out.EvictionSoft = (*KubeletConfigEviction)(unsafe.Pointer(in.EvictionSoft)) + out.EvictionSoftGracePeriod = (*KubeletConfigEvictionSoftGracePeriod)(unsafe.Pointer(in.EvictionSoftGracePeriod)) + out.MaxPods = (*int32)(unsafe.Pointer(in.MaxPods)) + out.PodPIDsLimit = (*int64)(unsafe.Pointer(in.PodPIDsLimit)) + out.ImagePullProgressDeadline = (*metav1.Duration)(unsafe.Pointer(in.ImagePullProgressDeadline)) + out.FailSwapOn = (*bool)(unsafe.Pointer(in.FailSwapOn)) + out.KubeReserved = (*KubeletConfigReserved)(unsafe.Pointer(in.KubeReserved)) + out.SystemReserved = (*KubeletConfigReserved)(unsafe.Pointer(in.SystemReserved)) + return nil +} + +// Convert_core_KubeletConfig_To_v1alpha1_KubeletConfig is an autogenerated conversion function. +func Convert_core_KubeletConfig_To_v1alpha1_KubeletConfig(in *core.KubeletConfig, out *KubeletConfig, s conversion.Scope) error { + return autoConvert_core_KubeletConfig_To_v1alpha1_KubeletConfig(in, out, s) +} + +func autoConvert_v1alpha1_KubeletConfigEviction_To_core_KubeletConfigEviction(in *KubeletConfigEviction, out *core.KubeletConfigEviction, s conversion.Scope) error { + out.MemoryAvailable = (*string)(unsafe.Pointer(in.MemoryAvailable)) + out.ImageFSAvailable = (*string)(unsafe.Pointer(in.ImageFSAvailable)) + out.ImageFSInodesFree = (*string)(unsafe.Pointer(in.ImageFSInodesFree)) + out.NodeFSAvailable = (*string)(unsafe.Pointer(in.NodeFSAvailable)) + out.NodeFSInodesFree = (*string)(unsafe.Pointer(in.NodeFSInodesFree)) + return nil +} + +// Convert_v1alpha1_KubeletConfigEviction_To_core_KubeletConfigEviction is an autogenerated conversion function. +func Convert_v1alpha1_KubeletConfigEviction_To_core_KubeletConfigEviction(in *KubeletConfigEviction, out *core.KubeletConfigEviction, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeletConfigEviction_To_core_KubeletConfigEviction(in, out, s) +} + +func autoConvert_core_KubeletConfigEviction_To_v1alpha1_KubeletConfigEviction(in *core.KubeletConfigEviction, out *KubeletConfigEviction, s conversion.Scope) error { + out.MemoryAvailable = (*string)(unsafe.Pointer(in.MemoryAvailable)) + out.ImageFSAvailable = (*string)(unsafe.Pointer(in.ImageFSAvailable)) + out.ImageFSInodesFree = (*string)(unsafe.Pointer(in.ImageFSInodesFree)) + out.NodeFSAvailable = (*string)(unsafe.Pointer(in.NodeFSAvailable)) + out.NodeFSInodesFree = (*string)(unsafe.Pointer(in.NodeFSInodesFree)) + return nil +} + +// Convert_core_KubeletConfigEviction_To_v1alpha1_KubeletConfigEviction is an autogenerated conversion function. +func Convert_core_KubeletConfigEviction_To_v1alpha1_KubeletConfigEviction(in *core.KubeletConfigEviction, out *KubeletConfigEviction, s conversion.Scope) error { + return autoConvert_core_KubeletConfigEviction_To_v1alpha1_KubeletConfigEviction(in, out, s) +} + +func autoConvert_v1alpha1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim(in *KubeletConfigEvictionMinimumReclaim, out *core.KubeletConfigEvictionMinimumReclaim, s conversion.Scope) error { + out.MemoryAvailable = (*resource.Quantity)(unsafe.Pointer(in.MemoryAvailable)) + out.ImageFSAvailable = (*resource.Quantity)(unsafe.Pointer(in.ImageFSAvailable)) + out.ImageFSInodesFree = (*resource.Quantity)(unsafe.Pointer(in.ImageFSInodesFree)) + out.NodeFSAvailable = (*resource.Quantity)(unsafe.Pointer(in.NodeFSAvailable)) + out.NodeFSInodesFree = (*resource.Quantity)(unsafe.Pointer(in.NodeFSInodesFree)) + return nil +} + +// Convert_v1alpha1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim is an autogenerated conversion function. +func Convert_v1alpha1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim(in *KubeletConfigEvictionMinimumReclaim, out *core.KubeletConfigEvictionMinimumReclaim, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim(in, out, s) +} + +func autoConvert_core_KubeletConfigEvictionMinimumReclaim_To_v1alpha1_KubeletConfigEvictionMinimumReclaim(in *core.KubeletConfigEvictionMinimumReclaim, out *KubeletConfigEvictionMinimumReclaim, s conversion.Scope) error { + out.MemoryAvailable = (*resource.Quantity)(unsafe.Pointer(in.MemoryAvailable)) + out.ImageFSAvailable = (*resource.Quantity)(unsafe.Pointer(in.ImageFSAvailable)) + out.ImageFSInodesFree = (*resource.Quantity)(unsafe.Pointer(in.ImageFSInodesFree)) + out.NodeFSAvailable = (*resource.Quantity)(unsafe.Pointer(in.NodeFSAvailable)) + out.NodeFSInodesFree = (*resource.Quantity)(unsafe.Pointer(in.NodeFSInodesFree)) + return nil +} + +// Convert_core_KubeletConfigEvictionMinimumReclaim_To_v1alpha1_KubeletConfigEvictionMinimumReclaim is an autogenerated conversion function. +func Convert_core_KubeletConfigEvictionMinimumReclaim_To_v1alpha1_KubeletConfigEvictionMinimumReclaim(in *core.KubeletConfigEvictionMinimumReclaim, out *KubeletConfigEvictionMinimumReclaim, s conversion.Scope) error { + return autoConvert_core_KubeletConfigEvictionMinimumReclaim_To_v1alpha1_KubeletConfigEvictionMinimumReclaim(in, out, s) +} + +func autoConvert_v1alpha1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod(in *KubeletConfigEvictionSoftGracePeriod, out *core.KubeletConfigEvictionSoftGracePeriod, s conversion.Scope) error { + out.MemoryAvailable = (*metav1.Duration)(unsafe.Pointer(in.MemoryAvailable)) + out.ImageFSAvailable = (*metav1.Duration)(unsafe.Pointer(in.ImageFSAvailable)) + out.ImageFSInodesFree = (*metav1.Duration)(unsafe.Pointer(in.ImageFSInodesFree)) + out.NodeFSAvailable = (*metav1.Duration)(unsafe.Pointer(in.NodeFSAvailable)) + out.NodeFSInodesFree = (*metav1.Duration)(unsafe.Pointer(in.NodeFSInodesFree)) + return nil +} + +// Convert_v1alpha1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod is an autogenerated conversion function. +func Convert_v1alpha1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod(in *KubeletConfigEvictionSoftGracePeriod, out *core.KubeletConfigEvictionSoftGracePeriod, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod(in, out, s) +} + +func autoConvert_core_KubeletConfigEvictionSoftGracePeriod_To_v1alpha1_KubeletConfigEvictionSoftGracePeriod(in *core.KubeletConfigEvictionSoftGracePeriod, out *KubeletConfigEvictionSoftGracePeriod, s conversion.Scope) error { + out.MemoryAvailable = (*metav1.Duration)(unsafe.Pointer(in.MemoryAvailable)) + out.ImageFSAvailable = (*metav1.Duration)(unsafe.Pointer(in.ImageFSAvailable)) + out.ImageFSInodesFree = (*metav1.Duration)(unsafe.Pointer(in.ImageFSInodesFree)) + out.NodeFSAvailable = (*metav1.Duration)(unsafe.Pointer(in.NodeFSAvailable)) + out.NodeFSInodesFree = (*metav1.Duration)(unsafe.Pointer(in.NodeFSInodesFree)) + return nil +} + +// Convert_core_KubeletConfigEvictionSoftGracePeriod_To_v1alpha1_KubeletConfigEvictionSoftGracePeriod is an autogenerated conversion function. +func Convert_core_KubeletConfigEvictionSoftGracePeriod_To_v1alpha1_KubeletConfigEvictionSoftGracePeriod(in *core.KubeletConfigEvictionSoftGracePeriod, out *KubeletConfigEvictionSoftGracePeriod, s conversion.Scope) error { + return autoConvert_core_KubeletConfigEvictionSoftGracePeriod_To_v1alpha1_KubeletConfigEvictionSoftGracePeriod(in, out, s) +} + +func autoConvert_v1alpha1_KubeletConfigReserved_To_core_KubeletConfigReserved(in *KubeletConfigReserved, out *core.KubeletConfigReserved, s conversion.Scope) error { + out.CPU = (*resource.Quantity)(unsafe.Pointer(in.CPU)) + out.Memory = (*resource.Quantity)(unsafe.Pointer(in.Memory)) + out.EphemeralStorage = (*resource.Quantity)(unsafe.Pointer(in.EphemeralStorage)) + out.PID = (*resource.Quantity)(unsafe.Pointer(in.PID)) + return nil +} + +// Convert_v1alpha1_KubeletConfigReserved_To_core_KubeletConfigReserved is an autogenerated conversion function. +func Convert_v1alpha1_KubeletConfigReserved_To_core_KubeletConfigReserved(in *KubeletConfigReserved, out *core.KubeletConfigReserved, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeletConfigReserved_To_core_KubeletConfigReserved(in, out, s) +} + +func autoConvert_core_KubeletConfigReserved_To_v1alpha1_KubeletConfigReserved(in *core.KubeletConfigReserved, out *KubeletConfigReserved, s conversion.Scope) error { + out.CPU = (*resource.Quantity)(unsafe.Pointer(in.CPU)) + out.Memory = (*resource.Quantity)(unsafe.Pointer(in.Memory)) + out.EphemeralStorage = (*resource.Quantity)(unsafe.Pointer(in.EphemeralStorage)) + out.PID = (*resource.Quantity)(unsafe.Pointer(in.PID)) + return nil +} + +// Convert_core_KubeletConfigReserved_To_v1alpha1_KubeletConfigReserved is an autogenerated conversion function. +func Convert_core_KubeletConfigReserved_To_v1alpha1_KubeletConfigReserved(in *core.KubeletConfigReserved, out *KubeletConfigReserved, s conversion.Scope) error { + return autoConvert_core_KubeletConfigReserved_To_v1alpha1_KubeletConfigReserved(in, out, s) +} + +func autoConvert_v1alpha1_Kubernetes_To_core_Kubernetes(in *Kubernetes, out *core.Kubernetes, s conversion.Scope) error { + out.AllowPrivilegedContainers = (*bool)(unsafe.Pointer(in.AllowPrivilegedContainers)) + out.ClusterAutoscaler = (*core.ClusterAutoscaler)(unsafe.Pointer(in.ClusterAutoscaler)) + out.KubeAPIServer = (*core.KubeAPIServerConfig)(unsafe.Pointer(in.KubeAPIServer)) + out.KubeControllerManager = (*core.KubeControllerManagerConfig)(unsafe.Pointer(in.KubeControllerManager)) + out.KubeScheduler = (*core.KubeSchedulerConfig)(unsafe.Pointer(in.KubeScheduler)) + out.KubeProxy = (*core.KubeProxyConfig)(unsafe.Pointer(in.KubeProxy)) + out.Kubelet = (*core.KubeletConfig)(unsafe.Pointer(in.Kubelet)) + out.Version = in.Version + out.VerticalPodAutoscaler = (*core.VerticalPodAutoscaler)(unsafe.Pointer(in.VerticalPodAutoscaler)) + return nil +} + +// Convert_v1alpha1_Kubernetes_To_core_Kubernetes is an autogenerated conversion function. +func Convert_v1alpha1_Kubernetes_To_core_Kubernetes(in *Kubernetes, out *core.Kubernetes, s conversion.Scope) error { + return autoConvert_v1alpha1_Kubernetes_To_core_Kubernetes(in, out, s) +} + +func autoConvert_core_Kubernetes_To_v1alpha1_Kubernetes(in *core.Kubernetes, out *Kubernetes, s conversion.Scope) error { + out.AllowPrivilegedContainers = (*bool)(unsafe.Pointer(in.AllowPrivilegedContainers)) + out.ClusterAutoscaler = (*ClusterAutoscaler)(unsafe.Pointer(in.ClusterAutoscaler)) + out.KubeAPIServer = (*KubeAPIServerConfig)(unsafe.Pointer(in.KubeAPIServer)) + out.KubeControllerManager = (*KubeControllerManagerConfig)(unsafe.Pointer(in.KubeControllerManager)) + out.KubeScheduler = (*KubeSchedulerConfig)(unsafe.Pointer(in.KubeScheduler)) + out.KubeProxy = (*KubeProxyConfig)(unsafe.Pointer(in.KubeProxy)) + out.Kubelet = (*KubeletConfig)(unsafe.Pointer(in.Kubelet)) + out.Version = in.Version + out.VerticalPodAutoscaler = (*VerticalPodAutoscaler)(unsafe.Pointer(in.VerticalPodAutoscaler)) + return nil +} + +// Convert_core_Kubernetes_To_v1alpha1_Kubernetes is an autogenerated conversion function. +func Convert_core_Kubernetes_To_v1alpha1_Kubernetes(in *core.Kubernetes, out *Kubernetes, s conversion.Scope) error { + return autoConvert_core_Kubernetes_To_v1alpha1_Kubernetes(in, out, s) +} + +func autoConvert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig(in *KubernetesConfig, out *core.KubernetesConfig, s conversion.Scope) error { + out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) + return nil +} + +// Convert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig is an autogenerated conversion function. +func Convert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig(in *KubernetesConfig, out *core.KubernetesConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_KubernetesConfig_To_core_KubernetesConfig(in, out, s) +} + +func autoConvert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig(in *core.KubernetesConfig, out *KubernetesConfig, s conversion.Scope) error { + out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) + return nil +} + +// Convert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig is an autogenerated conversion function. +func Convert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig(in *core.KubernetesConfig, out *KubernetesConfig, s conversion.Scope) error { + return autoConvert_core_KubernetesConfig_To_v1alpha1_KubernetesConfig(in, out, s) +} + +func autoConvert_v1alpha1_KubernetesDashboard_To_core_KubernetesDashboard(in *KubernetesDashboard, out *core.KubernetesDashboard, s conversion.Scope) error { + if err := Convert_v1alpha1_Addon_To_core_Addon(&in.Addon, &out.Addon, s); err != nil { + return err + } + out.AuthenticationMode = (*string)(unsafe.Pointer(in.AuthenticationMode)) + return nil +} + +// Convert_v1alpha1_KubernetesDashboard_To_core_KubernetesDashboard is an autogenerated conversion function. +func Convert_v1alpha1_KubernetesDashboard_To_core_KubernetesDashboard(in *KubernetesDashboard, out *core.KubernetesDashboard, s conversion.Scope) error { + return autoConvert_v1alpha1_KubernetesDashboard_To_core_KubernetesDashboard(in, out, s) +} + +func autoConvert_core_KubernetesDashboard_To_v1alpha1_KubernetesDashboard(in *core.KubernetesDashboard, out *KubernetesDashboard, s conversion.Scope) error { + if err := Convert_core_Addon_To_v1alpha1_Addon(&in.Addon, &out.Addon, s); err != nil { + return err + } + out.AuthenticationMode = (*string)(unsafe.Pointer(in.AuthenticationMode)) + return nil +} + +// Convert_core_KubernetesDashboard_To_v1alpha1_KubernetesDashboard is an autogenerated conversion function. +func Convert_core_KubernetesDashboard_To_v1alpha1_KubernetesDashboard(in *core.KubernetesDashboard, out *KubernetesDashboard, s conversion.Scope) error { + return autoConvert_core_KubernetesDashboard_To_v1alpha1_KubernetesDashboard(in, out, s) +} + +func autoConvert_v1alpha1_KubernetesInfo_To_core_KubernetesInfo(in *KubernetesInfo, out *core.KubernetesInfo, s conversion.Scope) error { + out.Version = in.Version + return nil +} + +// Convert_v1alpha1_KubernetesInfo_To_core_KubernetesInfo is an autogenerated conversion function. +func Convert_v1alpha1_KubernetesInfo_To_core_KubernetesInfo(in *KubernetesInfo, out *core.KubernetesInfo, s conversion.Scope) error { + return autoConvert_v1alpha1_KubernetesInfo_To_core_KubernetesInfo(in, out, s) +} + +func autoConvert_core_KubernetesInfo_To_v1alpha1_KubernetesInfo(in *core.KubernetesInfo, out *KubernetesInfo, s conversion.Scope) error { + out.Version = in.Version + return nil +} + +// Convert_core_KubernetesInfo_To_v1alpha1_KubernetesInfo is an autogenerated conversion function. +func Convert_core_KubernetesInfo_To_v1alpha1_KubernetesInfo(in *core.KubernetesInfo, out *KubernetesInfo, s conversion.Scope) error { + return autoConvert_core_KubernetesInfo_To_v1alpha1_KubernetesInfo(in, out, s) +} + +func autoConvert_v1alpha1_KubernetesSettings_To_core_KubernetesSettings(in *KubernetesSettings, out *core.KubernetesSettings, s conversion.Scope) error { + out.Versions = *(*[]core.ExpirableVersion)(unsafe.Pointer(&in.Versions)) + return nil +} + +// Convert_v1alpha1_KubernetesSettings_To_core_KubernetesSettings is an autogenerated conversion function. +func Convert_v1alpha1_KubernetesSettings_To_core_KubernetesSettings(in *KubernetesSettings, out *core.KubernetesSettings, s conversion.Scope) error { + return autoConvert_v1alpha1_KubernetesSettings_To_core_KubernetesSettings(in, out, s) +} + +func autoConvert_core_KubernetesSettings_To_v1alpha1_KubernetesSettings(in *core.KubernetesSettings, out *KubernetesSettings, s conversion.Scope) error { + out.Versions = *(*[]ExpirableVersion)(unsafe.Pointer(&in.Versions)) + return nil +} + +// Convert_core_KubernetesSettings_To_v1alpha1_KubernetesSettings is an autogenerated conversion function. +func Convert_core_KubernetesSettings_To_v1alpha1_KubernetesSettings(in *core.KubernetesSettings, out *KubernetesSettings, s conversion.Scope) error { + return autoConvert_core_KubernetesSettings_To_v1alpha1_KubernetesSettings(in, out, s) +} + +func autoConvert_v1alpha1_LastError_To_core_LastError(in *LastError, out *core.LastError, s conversion.Scope) error { + out.Description = in.Description + out.TaskID = (*string)(unsafe.Pointer(in.TaskID)) + out.Codes = *(*[]core.ErrorCode)(unsafe.Pointer(&in.Codes)) + out.LastUpdateTime = (*metav1.Time)(unsafe.Pointer(in.LastUpdateTime)) + return nil +} + +// Convert_v1alpha1_LastError_To_core_LastError is an autogenerated conversion function. +func Convert_v1alpha1_LastError_To_core_LastError(in *LastError, out *core.LastError, s conversion.Scope) error { + return autoConvert_v1alpha1_LastError_To_core_LastError(in, out, s) +} + +func autoConvert_core_LastError_To_v1alpha1_LastError(in *core.LastError, out *LastError, s conversion.Scope) error { + out.Description = in.Description + out.TaskID = (*string)(unsafe.Pointer(in.TaskID)) + out.Codes = *(*[]ErrorCode)(unsafe.Pointer(&in.Codes)) + out.LastUpdateTime = (*metav1.Time)(unsafe.Pointer(in.LastUpdateTime)) + return nil +} + +// Convert_core_LastError_To_v1alpha1_LastError is an autogenerated conversion function. +func Convert_core_LastError_To_v1alpha1_LastError(in *core.LastError, out *LastError, s conversion.Scope) error { + return autoConvert_core_LastError_To_v1alpha1_LastError(in, out, s) +} + +func autoConvert_v1alpha1_LastOperation_To_core_LastOperation(in *LastOperation, out *core.LastOperation, s conversion.Scope) error { + out.Description = in.Description + out.LastUpdateTime = in.LastUpdateTime + out.Progress = in.Progress + out.State = core.LastOperationState(in.State) + out.Type = core.LastOperationType(in.Type) + return nil +} + +// Convert_v1alpha1_LastOperation_To_core_LastOperation is an autogenerated conversion function. +func Convert_v1alpha1_LastOperation_To_core_LastOperation(in *LastOperation, out *core.LastOperation, s conversion.Scope) error { + return autoConvert_v1alpha1_LastOperation_To_core_LastOperation(in, out, s) +} + +func autoConvert_core_LastOperation_To_v1alpha1_LastOperation(in *core.LastOperation, out *LastOperation, s conversion.Scope) error { + out.Description = in.Description + out.LastUpdateTime = in.LastUpdateTime + out.Progress = in.Progress + out.State = LastOperationState(in.State) + out.Type = LastOperationType(in.Type) + return nil +} + +// Convert_core_LastOperation_To_v1alpha1_LastOperation is an autogenerated conversion function. +func Convert_core_LastOperation_To_v1alpha1_LastOperation(in *core.LastOperation, out *LastOperation, s conversion.Scope) error { + return autoConvert_core_LastOperation_To_v1alpha1_LastOperation(in, out, s) +} + +func autoConvert_v1alpha1_Machine_To_core_Machine(in *Machine, out *core.Machine, s conversion.Scope) error { + out.Type = in.Type + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(core.ShootMachineImage) + if err := Convert_v1alpha1_ShootMachineImage_To_core_ShootMachineImage(*in, *out, s); err != nil { + return err + } + } else { + out.Image = nil + } + return nil +} + +// Convert_v1alpha1_Machine_To_core_Machine is an autogenerated conversion function. +func Convert_v1alpha1_Machine_To_core_Machine(in *Machine, out *core.Machine, s conversion.Scope) error { + return autoConvert_v1alpha1_Machine_To_core_Machine(in, out, s) +} + +func autoConvert_core_Machine_To_v1alpha1_Machine(in *core.Machine, out *Machine, s conversion.Scope) error { + out.Type = in.Type + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(ShootMachineImage) + if err := Convert_core_ShootMachineImage_To_v1alpha1_ShootMachineImage(*in, *out, s); err != nil { + return err + } + } else { + out.Image = nil + } + return nil +} + +// Convert_core_Machine_To_v1alpha1_Machine is an autogenerated conversion function. +func Convert_core_Machine_To_v1alpha1_Machine(in *core.Machine, out *Machine, s conversion.Scope) error { + return autoConvert_core_Machine_To_v1alpha1_Machine(in, out, s) +} + +func autoConvert_v1alpha1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings(in *MachineControllerManagerSettings, out *core.MachineControllerManagerSettings, s conversion.Scope) error { + out.MachineDrainTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineDrainTimeout)) + out.MachineHealthTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineHealthTimeout)) + out.MachineCreationTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineCreationTimeout)) + out.MaxEvictRetries = (*int32)(unsafe.Pointer(in.MaxEvictRetries)) + out.NodeConditions = *(*[]string)(unsafe.Pointer(&in.NodeConditions)) + return nil +} + +// Convert_v1alpha1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings is an autogenerated conversion function. +func Convert_v1alpha1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings(in *MachineControllerManagerSettings, out *core.MachineControllerManagerSettings, s conversion.Scope) error { + return autoConvert_v1alpha1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings(in, out, s) +} + +func autoConvert_core_MachineControllerManagerSettings_To_v1alpha1_MachineControllerManagerSettings(in *core.MachineControllerManagerSettings, out *MachineControllerManagerSettings, s conversion.Scope) error { + out.MachineDrainTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineDrainTimeout)) + out.MachineHealthTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineHealthTimeout)) + out.MachineCreationTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineCreationTimeout)) + out.MaxEvictRetries = (*int32)(unsafe.Pointer(in.MaxEvictRetries)) + out.NodeConditions = *(*[]string)(unsafe.Pointer(&in.NodeConditions)) + return nil +} + +// Convert_core_MachineControllerManagerSettings_To_v1alpha1_MachineControllerManagerSettings is an autogenerated conversion function. +func Convert_core_MachineControllerManagerSettings_To_v1alpha1_MachineControllerManagerSettings(in *core.MachineControllerManagerSettings, out *MachineControllerManagerSettings, s conversion.Scope) error { + return autoConvert_core_MachineControllerManagerSettings_To_v1alpha1_MachineControllerManagerSettings(in, out, s) +} + +func autoConvert_v1alpha1_MachineImage_To_core_MachineImage(in *MachineImage, out *core.MachineImage, s conversion.Scope) error { + out.Name = in.Name + out.Versions = *(*[]core.MachineImageVersion)(unsafe.Pointer(&in.Versions)) + return nil +} + +// Convert_v1alpha1_MachineImage_To_core_MachineImage is an autogenerated conversion function. +func Convert_v1alpha1_MachineImage_To_core_MachineImage(in *MachineImage, out *core.MachineImage, s conversion.Scope) error { + return autoConvert_v1alpha1_MachineImage_To_core_MachineImage(in, out, s) +} + +func autoConvert_core_MachineImage_To_v1alpha1_MachineImage(in *core.MachineImage, out *MachineImage, s conversion.Scope) error { + out.Name = in.Name + out.Versions = *(*[]MachineImageVersion)(unsafe.Pointer(&in.Versions)) + return nil +} + +// Convert_core_MachineImage_To_v1alpha1_MachineImage is an autogenerated conversion function. +func Convert_core_MachineImage_To_v1alpha1_MachineImage(in *core.MachineImage, out *MachineImage, s conversion.Scope) error { + return autoConvert_core_MachineImage_To_v1alpha1_MachineImage(in, out, s) +} + +func autoConvert_v1alpha1_MachineImageVersion_To_core_MachineImageVersion(in *MachineImageVersion, out *core.MachineImageVersion, s conversion.Scope) error { + if err := Convert_v1alpha1_ExpirableVersion_To_core_ExpirableVersion(&in.ExpirableVersion, &out.ExpirableVersion, s); err != nil { + return err + } + out.CRI = *(*[]core.CRI)(unsafe.Pointer(&in.CRI)) + return nil +} + +// Convert_v1alpha1_MachineImageVersion_To_core_MachineImageVersion is an autogenerated conversion function. +func Convert_v1alpha1_MachineImageVersion_To_core_MachineImageVersion(in *MachineImageVersion, out *core.MachineImageVersion, s conversion.Scope) error { + return autoConvert_v1alpha1_MachineImageVersion_To_core_MachineImageVersion(in, out, s) +} + +func autoConvert_core_MachineImageVersion_To_v1alpha1_MachineImageVersion(in *core.MachineImageVersion, out *MachineImageVersion, s conversion.Scope) error { + if err := Convert_core_ExpirableVersion_To_v1alpha1_ExpirableVersion(&in.ExpirableVersion, &out.ExpirableVersion, s); err != nil { + return err + } + out.CRI = *(*[]CRI)(unsafe.Pointer(&in.CRI)) + return nil +} + +// Convert_core_MachineImageVersion_To_v1alpha1_MachineImageVersion is an autogenerated conversion function. +func Convert_core_MachineImageVersion_To_v1alpha1_MachineImageVersion(in *core.MachineImageVersion, out *MachineImageVersion, s conversion.Scope) error { + return autoConvert_core_MachineImageVersion_To_v1alpha1_MachineImageVersion(in, out, s) +} + +func autoConvert_v1alpha1_MachineType_To_core_MachineType(in *MachineType, out *core.MachineType, s conversion.Scope) error { + out.CPU = in.CPU + out.GPU = in.GPU + out.Memory = in.Memory + out.Name = in.Name + out.Storage = (*core.MachineTypeStorage)(unsafe.Pointer(in.Storage)) + out.Usable = (*bool)(unsafe.Pointer(in.Usable)) + return nil +} + +// Convert_v1alpha1_MachineType_To_core_MachineType is an autogenerated conversion function. +func Convert_v1alpha1_MachineType_To_core_MachineType(in *MachineType, out *core.MachineType, s conversion.Scope) error { + return autoConvert_v1alpha1_MachineType_To_core_MachineType(in, out, s) +} + +func autoConvert_core_MachineType_To_v1alpha1_MachineType(in *core.MachineType, out *MachineType, s conversion.Scope) error { + out.CPU = in.CPU + out.GPU = in.GPU + out.Memory = in.Memory + out.Name = in.Name + out.Storage = (*MachineTypeStorage)(unsafe.Pointer(in.Storage)) + out.Usable = (*bool)(unsafe.Pointer(in.Usable)) + return nil +} + +// Convert_core_MachineType_To_v1alpha1_MachineType is an autogenerated conversion function. +func Convert_core_MachineType_To_v1alpha1_MachineType(in *core.MachineType, out *MachineType, s conversion.Scope) error { + return autoConvert_core_MachineType_To_v1alpha1_MachineType(in, out, s) +} + +func autoConvert_v1alpha1_MachineTypeStorage_To_core_MachineTypeStorage(in *MachineTypeStorage, out *core.MachineTypeStorage, s conversion.Scope) error { + out.Class = in.Class + out.StorageSize = in.StorageSize + out.Type = in.Type + return nil +} + +// Convert_v1alpha1_MachineTypeStorage_To_core_MachineTypeStorage is an autogenerated conversion function. +func Convert_v1alpha1_MachineTypeStorage_To_core_MachineTypeStorage(in *MachineTypeStorage, out *core.MachineTypeStorage, s conversion.Scope) error { + return autoConvert_v1alpha1_MachineTypeStorage_To_core_MachineTypeStorage(in, out, s) +} + +func autoConvert_core_MachineTypeStorage_To_v1alpha1_MachineTypeStorage(in *core.MachineTypeStorage, out *MachineTypeStorage, s conversion.Scope) error { + out.Class = in.Class + out.StorageSize = in.StorageSize + out.Type = in.Type + return nil +} + +// Convert_core_MachineTypeStorage_To_v1alpha1_MachineTypeStorage is an autogenerated conversion function. +func Convert_core_MachineTypeStorage_To_v1alpha1_MachineTypeStorage(in *core.MachineTypeStorage, out *MachineTypeStorage, s conversion.Scope) error { + return autoConvert_core_MachineTypeStorage_To_v1alpha1_MachineTypeStorage(in, out, s) +} + +func autoConvert_v1alpha1_Maintenance_To_core_Maintenance(in *Maintenance, out *core.Maintenance, s conversion.Scope) error { + out.AutoUpdate = (*core.MaintenanceAutoUpdate)(unsafe.Pointer(in.AutoUpdate)) + out.TimeWindow = (*core.MaintenanceTimeWindow)(unsafe.Pointer(in.TimeWindow)) + out.ConfineSpecUpdateRollout = (*bool)(unsafe.Pointer(in.ConfineSpecUpdateRollout)) + return nil +} + +// Convert_v1alpha1_Maintenance_To_core_Maintenance is an autogenerated conversion function. +func Convert_v1alpha1_Maintenance_To_core_Maintenance(in *Maintenance, out *core.Maintenance, s conversion.Scope) error { + return autoConvert_v1alpha1_Maintenance_To_core_Maintenance(in, out, s) +} + +func autoConvert_core_Maintenance_To_v1alpha1_Maintenance(in *core.Maintenance, out *Maintenance, s conversion.Scope) error { + out.AutoUpdate = (*MaintenanceAutoUpdate)(unsafe.Pointer(in.AutoUpdate)) + out.TimeWindow = (*MaintenanceTimeWindow)(unsafe.Pointer(in.TimeWindow)) + out.ConfineSpecUpdateRollout = (*bool)(unsafe.Pointer(in.ConfineSpecUpdateRollout)) + return nil +} + +// Convert_core_Maintenance_To_v1alpha1_Maintenance is an autogenerated conversion function. +func Convert_core_Maintenance_To_v1alpha1_Maintenance(in *core.Maintenance, out *Maintenance, s conversion.Scope) error { + return autoConvert_core_Maintenance_To_v1alpha1_Maintenance(in, out, s) +} + +func autoConvert_v1alpha1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate(in *MaintenanceAutoUpdate, out *core.MaintenanceAutoUpdate, s conversion.Scope) error { + out.KubernetesVersion = in.KubernetesVersion + out.MachineImageVersion = in.MachineImageVersion + return nil +} + +// Convert_v1alpha1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate is an autogenerated conversion function. +func Convert_v1alpha1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate(in *MaintenanceAutoUpdate, out *core.MaintenanceAutoUpdate, s conversion.Scope) error { + return autoConvert_v1alpha1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate(in, out, s) +} + +func autoConvert_core_MaintenanceAutoUpdate_To_v1alpha1_MaintenanceAutoUpdate(in *core.MaintenanceAutoUpdate, out *MaintenanceAutoUpdate, s conversion.Scope) error { + out.KubernetesVersion = in.KubernetesVersion + out.MachineImageVersion = in.MachineImageVersion + return nil +} + +// Convert_core_MaintenanceAutoUpdate_To_v1alpha1_MaintenanceAutoUpdate is an autogenerated conversion function. +func Convert_core_MaintenanceAutoUpdate_To_v1alpha1_MaintenanceAutoUpdate(in *core.MaintenanceAutoUpdate, out *MaintenanceAutoUpdate, s conversion.Scope) error { + return autoConvert_core_MaintenanceAutoUpdate_To_v1alpha1_MaintenanceAutoUpdate(in, out, s) +} + +func autoConvert_v1alpha1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow(in *MaintenanceTimeWindow, out *core.MaintenanceTimeWindow, s conversion.Scope) error { + out.Begin = in.Begin + out.End = in.End + return nil +} + +// Convert_v1alpha1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow is an autogenerated conversion function. +func Convert_v1alpha1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow(in *MaintenanceTimeWindow, out *core.MaintenanceTimeWindow, s conversion.Scope) error { + return autoConvert_v1alpha1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow(in, out, s) +} + +func autoConvert_core_MaintenanceTimeWindow_To_v1alpha1_MaintenanceTimeWindow(in *core.MaintenanceTimeWindow, out *MaintenanceTimeWindow, s conversion.Scope) error { + out.Begin = in.Begin + out.End = in.End + return nil +} + +// Convert_core_MaintenanceTimeWindow_To_v1alpha1_MaintenanceTimeWindow is an autogenerated conversion function. +func Convert_core_MaintenanceTimeWindow_To_v1alpha1_MaintenanceTimeWindow(in *core.MaintenanceTimeWindow, out *MaintenanceTimeWindow, s conversion.Scope) error { + return autoConvert_core_MaintenanceTimeWindow_To_v1alpha1_MaintenanceTimeWindow(in, out, s) +} + +func autoConvert_v1alpha1_Monitoring_To_core_Monitoring(in *Monitoring, out *core.Monitoring, s conversion.Scope) error { + out.Alerting = (*core.Alerting)(unsafe.Pointer(in.Alerting)) + return nil +} + +// Convert_v1alpha1_Monitoring_To_core_Monitoring is an autogenerated conversion function. +func Convert_v1alpha1_Monitoring_To_core_Monitoring(in *Monitoring, out *core.Monitoring, s conversion.Scope) error { + return autoConvert_v1alpha1_Monitoring_To_core_Monitoring(in, out, s) +} + +func autoConvert_core_Monitoring_To_v1alpha1_Monitoring(in *core.Monitoring, out *Monitoring, s conversion.Scope) error { + out.Alerting = (*Alerting)(unsafe.Pointer(in.Alerting)) + return nil +} + +// Convert_core_Monitoring_To_v1alpha1_Monitoring is an autogenerated conversion function. +func Convert_core_Monitoring_To_v1alpha1_Monitoring(in *core.Monitoring, out *Monitoring, s conversion.Scope) error { + return autoConvert_core_Monitoring_To_v1alpha1_Monitoring(in, out, s) +} + +func autoConvert_v1alpha1_NamedResourceReference_To_core_NamedResourceReference(in *NamedResourceReference, out *core.NamedResourceReference, s conversion.Scope) error { + out.Name = in.Name + out.ResourceRef = in.ResourceRef + return nil +} + +// Convert_v1alpha1_NamedResourceReference_To_core_NamedResourceReference is an autogenerated conversion function. +func Convert_v1alpha1_NamedResourceReference_To_core_NamedResourceReference(in *NamedResourceReference, out *core.NamedResourceReference, s conversion.Scope) error { + return autoConvert_v1alpha1_NamedResourceReference_To_core_NamedResourceReference(in, out, s) +} + +func autoConvert_core_NamedResourceReference_To_v1alpha1_NamedResourceReference(in *core.NamedResourceReference, out *NamedResourceReference, s conversion.Scope) error { + out.Name = in.Name + out.ResourceRef = in.ResourceRef + return nil +} + +// Convert_core_NamedResourceReference_To_v1alpha1_NamedResourceReference is an autogenerated conversion function. +func Convert_core_NamedResourceReference_To_v1alpha1_NamedResourceReference(in *core.NamedResourceReference, out *NamedResourceReference, s conversion.Scope) error { + return autoConvert_core_NamedResourceReference_To_v1alpha1_NamedResourceReference(in, out, s) +} + +func autoConvert_v1alpha1_Networking_To_core_Networking(in *Networking, out *core.Networking, s conversion.Scope) error { + out.Type = in.Type + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.Pods = (*string)(unsafe.Pointer(in.Pods)) + out.Nodes = (*string)(unsafe.Pointer(in.Nodes)) + out.Services = (*string)(unsafe.Pointer(in.Services)) + return nil +} + +// Convert_v1alpha1_Networking_To_core_Networking is an autogenerated conversion function. +func Convert_v1alpha1_Networking_To_core_Networking(in *Networking, out *core.Networking, s conversion.Scope) error { + return autoConvert_v1alpha1_Networking_To_core_Networking(in, out, s) +} + +func autoConvert_core_Networking_To_v1alpha1_Networking(in *core.Networking, out *Networking, s conversion.Scope) error { + out.Type = in.Type + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.Pods = (*string)(unsafe.Pointer(in.Pods)) + out.Nodes = (*string)(unsafe.Pointer(in.Nodes)) + out.Services = (*string)(unsafe.Pointer(in.Services)) + return nil +} + +// Convert_core_Networking_To_v1alpha1_Networking is an autogenerated conversion function. +func Convert_core_Networking_To_v1alpha1_Networking(in *core.Networking, out *Networking, s conversion.Scope) error { + return autoConvert_core_Networking_To_v1alpha1_Networking(in, out, s) +} + +func autoConvert_v1alpha1_NginxIngress_To_core_NginxIngress(in *NginxIngress, out *core.NginxIngress, s conversion.Scope) error { + if err := Convert_v1alpha1_Addon_To_core_Addon(&in.Addon, &out.Addon, s); err != nil { + return err + } + out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) + out.Config = *(*map[string]string)(unsafe.Pointer(&in.Config)) + out.ExternalTrafficPolicy = (*v1.ServiceExternalTrafficPolicyType)(unsafe.Pointer(in.ExternalTrafficPolicy)) + return nil +} + +// Convert_v1alpha1_NginxIngress_To_core_NginxIngress is an autogenerated conversion function. +func Convert_v1alpha1_NginxIngress_To_core_NginxIngress(in *NginxIngress, out *core.NginxIngress, s conversion.Scope) error { + return autoConvert_v1alpha1_NginxIngress_To_core_NginxIngress(in, out, s) +} + +func autoConvert_core_NginxIngress_To_v1alpha1_NginxIngress(in *core.NginxIngress, out *NginxIngress, s conversion.Scope) error { + if err := Convert_core_Addon_To_v1alpha1_Addon(&in.Addon, &out.Addon, s); err != nil { + return err + } + out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) + out.Config = *(*map[string]string)(unsafe.Pointer(&in.Config)) + out.ExternalTrafficPolicy = (*v1.ServiceExternalTrafficPolicyType)(unsafe.Pointer(in.ExternalTrafficPolicy)) + return nil +} + +// Convert_core_NginxIngress_To_v1alpha1_NginxIngress is an autogenerated conversion function. +func Convert_core_NginxIngress_To_v1alpha1_NginxIngress(in *core.NginxIngress, out *NginxIngress, s conversion.Scope) error { + return autoConvert_core_NginxIngress_To_v1alpha1_NginxIngress(in, out, s) +} + +func autoConvert_v1alpha1_OIDCConfig_To_core_OIDCConfig(in *OIDCConfig, out *core.OIDCConfig, s conversion.Scope) error { + out.CABundle = (*string)(unsafe.Pointer(in.CABundle)) + out.ClientAuthentication = (*core.OpenIDConnectClientAuthentication)(unsafe.Pointer(in.ClientAuthentication)) + out.ClientID = (*string)(unsafe.Pointer(in.ClientID)) + out.GroupsClaim = (*string)(unsafe.Pointer(in.GroupsClaim)) + out.GroupsPrefix = (*string)(unsafe.Pointer(in.GroupsPrefix)) + out.IssuerURL = (*string)(unsafe.Pointer(in.IssuerURL)) + out.RequiredClaims = *(*map[string]string)(unsafe.Pointer(&in.RequiredClaims)) + out.SigningAlgs = *(*[]string)(unsafe.Pointer(&in.SigningAlgs)) + out.UsernameClaim = (*string)(unsafe.Pointer(in.UsernameClaim)) + out.UsernamePrefix = (*string)(unsafe.Pointer(in.UsernamePrefix)) + return nil +} + +// Convert_v1alpha1_OIDCConfig_To_core_OIDCConfig is an autogenerated conversion function. +func Convert_v1alpha1_OIDCConfig_To_core_OIDCConfig(in *OIDCConfig, out *core.OIDCConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_OIDCConfig_To_core_OIDCConfig(in, out, s) +} + +func autoConvert_core_OIDCConfig_To_v1alpha1_OIDCConfig(in *core.OIDCConfig, out *OIDCConfig, s conversion.Scope) error { + out.CABundle = (*string)(unsafe.Pointer(in.CABundle)) + out.ClientAuthentication = (*OpenIDConnectClientAuthentication)(unsafe.Pointer(in.ClientAuthentication)) + out.ClientID = (*string)(unsafe.Pointer(in.ClientID)) + out.GroupsClaim = (*string)(unsafe.Pointer(in.GroupsClaim)) + out.GroupsPrefix = (*string)(unsafe.Pointer(in.GroupsPrefix)) + out.IssuerURL = (*string)(unsafe.Pointer(in.IssuerURL)) + out.RequiredClaims = *(*map[string]string)(unsafe.Pointer(&in.RequiredClaims)) + out.SigningAlgs = *(*[]string)(unsafe.Pointer(&in.SigningAlgs)) + out.UsernameClaim = (*string)(unsafe.Pointer(in.UsernameClaim)) + out.UsernamePrefix = (*string)(unsafe.Pointer(in.UsernamePrefix)) + return nil +} + +// Convert_core_OIDCConfig_To_v1alpha1_OIDCConfig is an autogenerated conversion function. +func Convert_core_OIDCConfig_To_v1alpha1_OIDCConfig(in *core.OIDCConfig, out *OIDCConfig, s conversion.Scope) error { + return autoConvert_core_OIDCConfig_To_v1alpha1_OIDCConfig(in, out, s) +} + +func autoConvert_v1alpha1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication(in *OpenIDConnectClientAuthentication, out *core.OpenIDConnectClientAuthentication, s conversion.Scope) error { + out.ExtraConfig = *(*map[string]string)(unsafe.Pointer(&in.ExtraConfig)) + out.Secret = (*string)(unsafe.Pointer(in.Secret)) + return nil +} + +// Convert_v1alpha1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication is an autogenerated conversion function. +func Convert_v1alpha1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication(in *OpenIDConnectClientAuthentication, out *core.OpenIDConnectClientAuthentication, s conversion.Scope) error { + return autoConvert_v1alpha1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication(in, out, s) +} + +func autoConvert_core_OpenIDConnectClientAuthentication_To_v1alpha1_OpenIDConnectClientAuthentication(in *core.OpenIDConnectClientAuthentication, out *OpenIDConnectClientAuthentication, s conversion.Scope) error { + out.ExtraConfig = *(*map[string]string)(unsafe.Pointer(&in.ExtraConfig)) + out.Secret = (*string)(unsafe.Pointer(in.Secret)) + return nil +} + +// Convert_core_OpenIDConnectClientAuthentication_To_v1alpha1_OpenIDConnectClientAuthentication is an autogenerated conversion function. +func Convert_core_OpenIDConnectClientAuthentication_To_v1alpha1_OpenIDConnectClientAuthentication(in *core.OpenIDConnectClientAuthentication, out *OpenIDConnectClientAuthentication, s conversion.Scope) error { + return autoConvert_core_OpenIDConnectClientAuthentication_To_v1alpha1_OpenIDConnectClientAuthentication(in, out, s) +} + +func autoConvert_v1alpha1_Plant_To_core_Plant(in *Plant, out *core.Plant, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_PlantSpec_To_core_PlantSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_PlantStatus_To_core_PlantStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_Plant_To_core_Plant is an autogenerated conversion function. +func Convert_v1alpha1_Plant_To_core_Plant(in *Plant, out *core.Plant, s conversion.Scope) error { + return autoConvert_v1alpha1_Plant_To_core_Plant(in, out, s) +} + +func autoConvert_core_Plant_To_v1alpha1_Plant(in *core.Plant, out *Plant, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PlantSpec_To_v1alpha1_PlantSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_PlantStatus_To_v1alpha1_PlantStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_Plant_To_v1alpha1_Plant is an autogenerated conversion function. +func Convert_core_Plant_To_v1alpha1_Plant(in *core.Plant, out *Plant, s conversion.Scope) error { + return autoConvert_core_Plant_To_v1alpha1_Plant(in, out, s) +} + +func autoConvert_v1alpha1_PlantList_To_core_PlantList(in *PlantList, out *core.PlantList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Plant)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_PlantList_To_core_PlantList is an autogenerated conversion function. +func Convert_v1alpha1_PlantList_To_core_PlantList(in *PlantList, out *core.PlantList, s conversion.Scope) error { + return autoConvert_v1alpha1_PlantList_To_core_PlantList(in, out, s) +} + +func autoConvert_core_PlantList_To_v1alpha1_PlantList(in *core.PlantList, out *PlantList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Plant)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_PlantList_To_v1alpha1_PlantList is an autogenerated conversion function. +func Convert_core_PlantList_To_v1alpha1_PlantList(in *core.PlantList, out *PlantList, s conversion.Scope) error { + return autoConvert_core_PlantList_To_v1alpha1_PlantList(in, out, s) +} + +func autoConvert_v1alpha1_PlantSpec_To_core_PlantSpec(in *PlantSpec, out *core.PlantSpec, s conversion.Scope) error { + out.SecretRef = in.SecretRef + out.Endpoints = *(*[]core.Endpoint)(unsafe.Pointer(&in.Endpoints)) + return nil +} + +// Convert_v1alpha1_PlantSpec_To_core_PlantSpec is an autogenerated conversion function. +func Convert_v1alpha1_PlantSpec_To_core_PlantSpec(in *PlantSpec, out *core.PlantSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_PlantSpec_To_core_PlantSpec(in, out, s) +} + +func autoConvert_core_PlantSpec_To_v1alpha1_PlantSpec(in *core.PlantSpec, out *PlantSpec, s conversion.Scope) error { + out.SecretRef = in.SecretRef + out.Endpoints = *(*[]Endpoint)(unsafe.Pointer(&in.Endpoints)) + return nil +} + +// Convert_core_PlantSpec_To_v1alpha1_PlantSpec is an autogenerated conversion function. +func Convert_core_PlantSpec_To_v1alpha1_PlantSpec(in *core.PlantSpec, out *PlantSpec, s conversion.Scope) error { + return autoConvert_core_PlantSpec_To_v1alpha1_PlantSpec(in, out, s) +} + +func autoConvert_v1alpha1_PlantStatus_To_core_PlantStatus(in *PlantStatus, out *core.PlantStatus, s conversion.Scope) error { + out.Conditions = *(*[]core.Condition)(unsafe.Pointer(&in.Conditions)) + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.ClusterInfo = (*core.ClusterInfo)(unsafe.Pointer(in.ClusterInfo)) + return nil +} + +// Convert_v1alpha1_PlantStatus_To_core_PlantStatus is an autogenerated conversion function. +func Convert_v1alpha1_PlantStatus_To_core_PlantStatus(in *PlantStatus, out *core.PlantStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_PlantStatus_To_core_PlantStatus(in, out, s) +} + +func autoConvert_core_PlantStatus_To_v1alpha1_PlantStatus(in *core.PlantStatus, out *PlantStatus, s conversion.Scope) error { + out.Conditions = *(*[]Condition)(unsafe.Pointer(&in.Conditions)) + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.ClusterInfo = (*ClusterInfo)(unsafe.Pointer(in.ClusterInfo)) + return nil +} + +// Convert_core_PlantStatus_To_v1alpha1_PlantStatus is an autogenerated conversion function. +func Convert_core_PlantStatus_To_v1alpha1_PlantStatus(in *core.PlantStatus, out *PlantStatus, s conversion.Scope) error { + return autoConvert_core_PlantStatus_To_v1alpha1_PlantStatus(in, out, s) +} + +func autoConvert_v1alpha1_Project_To_core_Project(in *Project, out *core.Project, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_ProjectSpec_To_core_ProjectSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_ProjectStatus_To_core_ProjectStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_Project_To_core_Project is an autogenerated conversion function. +func Convert_v1alpha1_Project_To_core_Project(in *Project, out *core.Project, s conversion.Scope) error { + return autoConvert_v1alpha1_Project_To_core_Project(in, out, s) +} + +func autoConvert_core_Project_To_v1alpha1_Project(in *core.Project, out *Project, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ProjectSpec_To_v1alpha1_ProjectSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_ProjectStatus_To_v1alpha1_ProjectStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_Project_To_v1alpha1_Project is an autogenerated conversion function. +func Convert_core_Project_To_v1alpha1_Project(in *core.Project, out *Project, s conversion.Scope) error { + return autoConvert_core_Project_To_v1alpha1_Project(in, out, s) +} + +func autoConvert_v1alpha1_ProjectList_To_core_ProjectList(in *ProjectList, out *core.ProjectList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.Project, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Project_To_core_Project(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1alpha1_ProjectList_To_core_ProjectList is an autogenerated conversion function. +func Convert_v1alpha1_ProjectList_To_core_ProjectList(in *ProjectList, out *core.ProjectList, s conversion.Scope) error { + return autoConvert_v1alpha1_ProjectList_To_core_ProjectList(in, out, s) +} + +func autoConvert_core_ProjectList_To_v1alpha1_ProjectList(in *core.ProjectList, out *ProjectList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Project, len(*in)) + for i := range *in { + if err := Convert_core_Project_To_v1alpha1_Project(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_ProjectList_To_v1alpha1_ProjectList is an autogenerated conversion function. +func Convert_core_ProjectList_To_v1alpha1_ProjectList(in *core.ProjectList, out *ProjectList, s conversion.Scope) error { + return autoConvert_core_ProjectList_To_v1alpha1_ProjectList(in, out, s) +} + +func autoConvert_v1alpha1_ProjectMember_To_core_ProjectMember(in *ProjectMember, out *core.ProjectMember, s conversion.Scope) error { + out.Subject = in.Subject + // WARNING: in.Role requires manual conversion: does not exist in peer-type + out.Roles = *(*[]string)(unsafe.Pointer(&in.Roles)) + return nil +} + +func autoConvert_core_ProjectMember_To_v1alpha1_ProjectMember(in *core.ProjectMember, out *ProjectMember, s conversion.Scope) error { + out.Subject = in.Subject + out.Roles = *(*[]string)(unsafe.Pointer(&in.Roles)) + return nil +} + +func autoConvert_v1alpha1_ProjectSpec_To_core_ProjectSpec(in *ProjectSpec, out *core.ProjectSpec, s conversion.Scope) error { + out.CreatedBy = (*rbacv1.Subject)(unsafe.Pointer(in.CreatedBy)) + out.Description = (*string)(unsafe.Pointer(in.Description)) + out.Owner = (*rbacv1.Subject)(unsafe.Pointer(in.Owner)) + out.Purpose = (*string)(unsafe.Pointer(in.Purpose)) + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]core.ProjectMember, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_ProjectMember_To_core_ProjectMember(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Members = nil + } + out.Namespace = (*string)(unsafe.Pointer(in.Namespace)) + out.Tolerations = (*core.ProjectTolerations)(unsafe.Pointer(in.Tolerations)) + return nil +} + +func autoConvert_core_ProjectSpec_To_v1alpha1_ProjectSpec(in *core.ProjectSpec, out *ProjectSpec, s conversion.Scope) error { + out.CreatedBy = (*rbacv1.Subject)(unsafe.Pointer(in.CreatedBy)) + out.Description = (*string)(unsafe.Pointer(in.Description)) + out.Owner = (*rbacv1.Subject)(unsafe.Pointer(in.Owner)) + out.Purpose = (*string)(unsafe.Pointer(in.Purpose)) + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]ProjectMember, len(*in)) + for i := range *in { + if err := Convert_core_ProjectMember_To_v1alpha1_ProjectMember(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Members = nil + } + out.Namespace = (*string)(unsafe.Pointer(in.Namespace)) + out.Tolerations = (*ProjectTolerations)(unsafe.Pointer(in.Tolerations)) + return nil +} + +func autoConvert_v1alpha1_ProjectStatus_To_core_ProjectStatus(in *ProjectStatus, out *core.ProjectStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.Phase = core.ProjectPhase(in.Phase) + out.StaleSinceTimestamp = (*metav1.Time)(unsafe.Pointer(in.StaleSinceTimestamp)) + out.StaleAutoDeleteTimestamp = (*metav1.Time)(unsafe.Pointer(in.StaleAutoDeleteTimestamp)) + return nil +} + +// Convert_v1alpha1_ProjectStatus_To_core_ProjectStatus is an autogenerated conversion function. +func Convert_v1alpha1_ProjectStatus_To_core_ProjectStatus(in *ProjectStatus, out *core.ProjectStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_ProjectStatus_To_core_ProjectStatus(in, out, s) +} + +func autoConvert_core_ProjectStatus_To_v1alpha1_ProjectStatus(in *core.ProjectStatus, out *ProjectStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.Phase = ProjectPhase(in.Phase) + out.StaleSinceTimestamp = (*metav1.Time)(unsafe.Pointer(in.StaleSinceTimestamp)) + out.StaleAutoDeleteTimestamp = (*metav1.Time)(unsafe.Pointer(in.StaleAutoDeleteTimestamp)) + return nil +} + +// Convert_core_ProjectStatus_To_v1alpha1_ProjectStatus is an autogenerated conversion function. +func Convert_core_ProjectStatus_To_v1alpha1_ProjectStatus(in *core.ProjectStatus, out *ProjectStatus, s conversion.Scope) error { + return autoConvert_core_ProjectStatus_To_v1alpha1_ProjectStatus(in, out, s) +} + +func autoConvert_v1alpha1_ProjectTolerations_To_core_ProjectTolerations(in *ProjectTolerations, out *core.ProjectTolerations, s conversion.Scope) error { + out.Defaults = *(*[]core.Toleration)(unsafe.Pointer(&in.Defaults)) + out.Whitelist = *(*[]core.Toleration)(unsafe.Pointer(&in.Whitelist)) + return nil +} + +// Convert_v1alpha1_ProjectTolerations_To_core_ProjectTolerations is an autogenerated conversion function. +func Convert_v1alpha1_ProjectTolerations_To_core_ProjectTolerations(in *ProjectTolerations, out *core.ProjectTolerations, s conversion.Scope) error { + return autoConvert_v1alpha1_ProjectTolerations_To_core_ProjectTolerations(in, out, s) +} + +func autoConvert_core_ProjectTolerations_To_v1alpha1_ProjectTolerations(in *core.ProjectTolerations, out *ProjectTolerations, s conversion.Scope) error { + out.Defaults = *(*[]Toleration)(unsafe.Pointer(&in.Defaults)) + out.Whitelist = *(*[]Toleration)(unsafe.Pointer(&in.Whitelist)) + return nil +} + +// Convert_core_ProjectTolerations_To_v1alpha1_ProjectTolerations is an autogenerated conversion function. +func Convert_core_ProjectTolerations_To_v1alpha1_ProjectTolerations(in *core.ProjectTolerations, out *ProjectTolerations, s conversion.Scope) error { + return autoConvert_core_ProjectTolerations_To_v1alpha1_ProjectTolerations(in, out, s) +} + +func autoConvert_v1alpha1_Provider_To_core_Provider(in *Provider, out *core.Provider, s conversion.Scope) error { + out.Type = in.Type + out.ControlPlaneConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ControlPlaneConfig)) + out.InfrastructureConfig = (*runtime.RawExtension)(unsafe.Pointer(in.InfrastructureConfig)) + if in.Workers != nil { + in, out := &in.Workers, &out.Workers + *out = make([]core.Worker, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Worker_To_core_Worker(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Workers = nil + } + return nil +} + +// Convert_v1alpha1_Provider_To_core_Provider is an autogenerated conversion function. +func Convert_v1alpha1_Provider_To_core_Provider(in *Provider, out *core.Provider, s conversion.Scope) error { + return autoConvert_v1alpha1_Provider_To_core_Provider(in, out, s) +} + +func autoConvert_core_Provider_To_v1alpha1_Provider(in *core.Provider, out *Provider, s conversion.Scope) error { + out.Type = in.Type + out.ControlPlaneConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ControlPlaneConfig)) + out.InfrastructureConfig = (*runtime.RawExtension)(unsafe.Pointer(in.InfrastructureConfig)) + if in.Workers != nil { + in, out := &in.Workers, &out.Workers + *out = make([]Worker, len(*in)) + for i := range *in { + if err := Convert_core_Worker_To_v1alpha1_Worker(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Workers = nil + } + return nil +} + +// Convert_core_Provider_To_v1alpha1_Provider is an autogenerated conversion function. +func Convert_core_Provider_To_v1alpha1_Provider(in *core.Provider, out *Provider, s conversion.Scope) error { + return autoConvert_core_Provider_To_v1alpha1_Provider(in, out, s) +} + +func autoConvert_v1alpha1_Quota_To_core_Quota(in *Quota, out *core.Quota, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_QuotaSpec_To_core_QuotaSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_Quota_To_core_Quota is an autogenerated conversion function. +func Convert_v1alpha1_Quota_To_core_Quota(in *Quota, out *core.Quota, s conversion.Scope) error { + return autoConvert_v1alpha1_Quota_To_core_Quota(in, out, s) +} + +func autoConvert_core_Quota_To_v1alpha1_Quota(in *core.Quota, out *Quota, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_QuotaSpec_To_v1alpha1_QuotaSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_core_Quota_To_v1alpha1_Quota is an autogenerated conversion function. +func Convert_core_Quota_To_v1alpha1_Quota(in *core.Quota, out *Quota, s conversion.Scope) error { + return autoConvert_core_Quota_To_v1alpha1_Quota(in, out, s) +} + +func autoConvert_v1alpha1_QuotaList_To_core_QuotaList(in *QuotaList, out *core.QuotaList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Quota)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_QuotaList_To_core_QuotaList is an autogenerated conversion function. +func Convert_v1alpha1_QuotaList_To_core_QuotaList(in *QuotaList, out *core.QuotaList, s conversion.Scope) error { + return autoConvert_v1alpha1_QuotaList_To_core_QuotaList(in, out, s) +} + +func autoConvert_core_QuotaList_To_v1alpha1_QuotaList(in *core.QuotaList, out *QuotaList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Quota)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_QuotaList_To_v1alpha1_QuotaList is an autogenerated conversion function. +func Convert_core_QuotaList_To_v1alpha1_QuotaList(in *core.QuotaList, out *QuotaList, s conversion.Scope) error { + return autoConvert_core_QuotaList_To_v1alpha1_QuotaList(in, out, s) +} + +func autoConvert_v1alpha1_QuotaSpec_To_core_QuotaSpec(in *QuotaSpec, out *core.QuotaSpec, s conversion.Scope) error { + out.ClusterLifetimeDays = (*int32)(unsafe.Pointer(in.ClusterLifetimeDays)) + out.Metrics = *(*v1.ResourceList)(unsafe.Pointer(&in.Metrics)) + out.Scope = in.Scope + return nil +} + +// Convert_v1alpha1_QuotaSpec_To_core_QuotaSpec is an autogenerated conversion function. +func Convert_v1alpha1_QuotaSpec_To_core_QuotaSpec(in *QuotaSpec, out *core.QuotaSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_QuotaSpec_To_core_QuotaSpec(in, out, s) +} + +func autoConvert_core_QuotaSpec_To_v1alpha1_QuotaSpec(in *core.QuotaSpec, out *QuotaSpec, s conversion.Scope) error { + out.ClusterLifetimeDays = (*int32)(unsafe.Pointer(in.ClusterLifetimeDays)) + out.Metrics = *(*v1.ResourceList)(unsafe.Pointer(&in.Metrics)) + out.Scope = in.Scope + return nil +} + +// Convert_core_QuotaSpec_To_v1alpha1_QuotaSpec is an autogenerated conversion function. +func Convert_core_QuotaSpec_To_v1alpha1_QuotaSpec(in *core.QuotaSpec, out *QuotaSpec, s conversion.Scope) error { + return autoConvert_core_QuotaSpec_To_v1alpha1_QuotaSpec(in, out, s) +} + +func autoConvert_v1alpha1_Region_To_core_Region(in *Region, out *core.Region, s conversion.Scope) error { + out.Name = in.Name + out.Zones = *(*[]core.AvailabilityZone)(unsafe.Pointer(&in.Zones)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + return nil +} + +// Convert_v1alpha1_Region_To_core_Region is an autogenerated conversion function. +func Convert_v1alpha1_Region_To_core_Region(in *Region, out *core.Region, s conversion.Scope) error { + return autoConvert_v1alpha1_Region_To_core_Region(in, out, s) +} + +func autoConvert_core_Region_To_v1alpha1_Region(in *core.Region, out *Region, s conversion.Scope) error { + out.Name = in.Name + out.Zones = *(*[]AvailabilityZone)(unsafe.Pointer(&in.Zones)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + return nil +} + +// Convert_core_Region_To_v1alpha1_Region is an autogenerated conversion function. +func Convert_core_Region_To_v1alpha1_Region(in *core.Region, out *Region, s conversion.Scope) error { + return autoConvert_core_Region_To_v1alpha1_Region(in, out, s) +} + +func autoConvert_v1alpha1_ResourceData_To_core_ResourceData(in *ResourceData, out *core.ResourceData, s conversion.Scope) error { + out.CrossVersionObjectReference = in.CrossVersionObjectReference + out.Data = in.Data + return nil +} + +// Convert_v1alpha1_ResourceData_To_core_ResourceData is an autogenerated conversion function. +func Convert_v1alpha1_ResourceData_To_core_ResourceData(in *ResourceData, out *core.ResourceData, s conversion.Scope) error { + return autoConvert_v1alpha1_ResourceData_To_core_ResourceData(in, out, s) +} + +func autoConvert_core_ResourceData_To_v1alpha1_ResourceData(in *core.ResourceData, out *ResourceData, s conversion.Scope) error { + out.CrossVersionObjectReference = in.CrossVersionObjectReference + out.Data = in.Data + return nil +} + +// Convert_core_ResourceData_To_v1alpha1_ResourceData is an autogenerated conversion function. +func Convert_core_ResourceData_To_v1alpha1_ResourceData(in *core.ResourceData, out *ResourceData, s conversion.Scope) error { + return autoConvert_core_ResourceData_To_v1alpha1_ResourceData(in, out, s) +} + +func autoConvert_v1alpha1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize(in *ResourceWatchCacheSize, out *core.ResourceWatchCacheSize, s conversion.Scope) error { + out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup)) + out.Resource = in.Resource + out.CacheSize = in.CacheSize + return nil +} + +// Convert_v1alpha1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize is an autogenerated conversion function. +func Convert_v1alpha1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize(in *ResourceWatchCacheSize, out *core.ResourceWatchCacheSize, s conversion.Scope) error { + return autoConvert_v1alpha1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize(in, out, s) +} + +func autoConvert_core_ResourceWatchCacheSize_To_v1alpha1_ResourceWatchCacheSize(in *core.ResourceWatchCacheSize, out *ResourceWatchCacheSize, s conversion.Scope) error { + out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup)) + out.Resource = in.Resource + out.CacheSize = in.CacheSize + return nil +} + +// Convert_core_ResourceWatchCacheSize_To_v1alpha1_ResourceWatchCacheSize is an autogenerated conversion function. +func Convert_core_ResourceWatchCacheSize_To_v1alpha1_ResourceWatchCacheSize(in *core.ResourceWatchCacheSize, out *ResourceWatchCacheSize, s conversion.Scope) error { + return autoConvert_core_ResourceWatchCacheSize_To_v1alpha1_ResourceWatchCacheSize(in, out, s) +} + +func autoConvert_v1alpha1_SecretBinding_To_core_SecretBinding(in *SecretBinding, out *core.SecretBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.SecretRef = in.SecretRef + out.Quotas = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.Quotas)) + return nil +} + +// Convert_v1alpha1_SecretBinding_To_core_SecretBinding is an autogenerated conversion function. +func Convert_v1alpha1_SecretBinding_To_core_SecretBinding(in *SecretBinding, out *core.SecretBinding, s conversion.Scope) error { + return autoConvert_v1alpha1_SecretBinding_To_core_SecretBinding(in, out, s) +} + +func autoConvert_core_SecretBinding_To_v1alpha1_SecretBinding(in *core.SecretBinding, out *SecretBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.SecretRef = in.SecretRef + out.Quotas = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.Quotas)) + return nil +} + +// Convert_core_SecretBinding_To_v1alpha1_SecretBinding is an autogenerated conversion function. +func Convert_core_SecretBinding_To_v1alpha1_SecretBinding(in *core.SecretBinding, out *SecretBinding, s conversion.Scope) error { + return autoConvert_core_SecretBinding_To_v1alpha1_SecretBinding(in, out, s) +} + +func autoConvert_v1alpha1_SecretBindingList_To_core_SecretBindingList(in *SecretBindingList, out *core.SecretBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.SecretBinding)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_SecretBindingList_To_core_SecretBindingList is an autogenerated conversion function. +func Convert_v1alpha1_SecretBindingList_To_core_SecretBindingList(in *SecretBindingList, out *core.SecretBindingList, s conversion.Scope) error { + return autoConvert_v1alpha1_SecretBindingList_To_core_SecretBindingList(in, out, s) +} + +func autoConvert_core_SecretBindingList_To_v1alpha1_SecretBindingList(in *core.SecretBindingList, out *SecretBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]SecretBinding)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_SecretBindingList_To_v1alpha1_SecretBindingList is an autogenerated conversion function. +func Convert_core_SecretBindingList_To_v1alpha1_SecretBindingList(in *core.SecretBindingList, out *SecretBindingList, s conversion.Scope) error { + return autoConvert_core_SecretBindingList_To_v1alpha1_SecretBindingList(in, out, s) +} + +func autoConvert_v1alpha1_Seed_To_core_Seed(in *Seed, out *core.Seed, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_SeedSpec_To_core_SeedSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_SeedStatus_To_core_SeedStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_core_Seed_To_v1alpha1_Seed(in *core.Seed, out *Seed, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_SeedSpec_To_v1alpha1_SeedSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_SeedStatus_To_v1alpha1_SeedStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_SeedBackup_To_core_SeedBackup(in *SeedBackup, out *core.SeedBackup, s conversion.Scope) error { + out.Provider = in.Provider + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.Region = (*string)(unsafe.Pointer(in.Region)) + out.SecretRef = in.SecretRef + return nil +} + +// Convert_v1alpha1_SeedBackup_To_core_SeedBackup is an autogenerated conversion function. +func Convert_v1alpha1_SeedBackup_To_core_SeedBackup(in *SeedBackup, out *core.SeedBackup, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedBackup_To_core_SeedBackup(in, out, s) +} + +func autoConvert_core_SeedBackup_To_v1alpha1_SeedBackup(in *core.SeedBackup, out *SeedBackup, s conversion.Scope) error { + out.Provider = in.Provider + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.Region = (*string)(unsafe.Pointer(in.Region)) + out.SecretRef = in.SecretRef + return nil +} + +// Convert_core_SeedBackup_To_v1alpha1_SeedBackup is an autogenerated conversion function. +func Convert_core_SeedBackup_To_v1alpha1_SeedBackup(in *core.SeedBackup, out *SeedBackup, s conversion.Scope) error { + return autoConvert_core_SeedBackup_To_v1alpha1_SeedBackup(in, out, s) +} + +func autoConvert_v1alpha1_SeedDNS_To_core_SeedDNS(in *SeedDNS, out *core.SeedDNS, s conversion.Scope) error { + out.IngressDomain = (*string)(unsafe.Pointer(in.IngressDomain)) + out.Provider = (*core.SeedDNSProvider)(unsafe.Pointer(in.Provider)) + return nil +} + +// Convert_v1alpha1_SeedDNS_To_core_SeedDNS is an autogenerated conversion function. +func Convert_v1alpha1_SeedDNS_To_core_SeedDNS(in *SeedDNS, out *core.SeedDNS, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedDNS_To_core_SeedDNS(in, out, s) +} + +func autoConvert_core_SeedDNS_To_v1alpha1_SeedDNS(in *core.SeedDNS, out *SeedDNS, s conversion.Scope) error { + out.IngressDomain = (*string)(unsafe.Pointer(in.IngressDomain)) + out.Provider = (*SeedDNSProvider)(unsafe.Pointer(in.Provider)) + return nil +} + +// Convert_core_SeedDNS_To_v1alpha1_SeedDNS is an autogenerated conversion function. +func Convert_core_SeedDNS_To_v1alpha1_SeedDNS(in *core.SeedDNS, out *SeedDNS, s conversion.Scope) error { + return autoConvert_core_SeedDNS_To_v1alpha1_SeedDNS(in, out, s) +} + +func autoConvert_v1alpha1_SeedDNSProvider_To_core_SeedDNSProvider(in *SeedDNSProvider, out *core.SeedDNSProvider, s conversion.Scope) error { + out.Type = in.Type + out.SecretRef = in.SecretRef + out.Domains = (*core.DNSIncludeExclude)(unsafe.Pointer(in.Domains)) + out.Zones = (*core.DNSIncludeExclude)(unsafe.Pointer(in.Zones)) + return nil +} + +// Convert_v1alpha1_SeedDNSProvider_To_core_SeedDNSProvider is an autogenerated conversion function. +func Convert_v1alpha1_SeedDNSProvider_To_core_SeedDNSProvider(in *SeedDNSProvider, out *core.SeedDNSProvider, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedDNSProvider_To_core_SeedDNSProvider(in, out, s) +} + +func autoConvert_core_SeedDNSProvider_To_v1alpha1_SeedDNSProvider(in *core.SeedDNSProvider, out *SeedDNSProvider, s conversion.Scope) error { + out.Type = in.Type + out.SecretRef = in.SecretRef + out.Domains = (*DNSIncludeExclude)(unsafe.Pointer(in.Domains)) + out.Zones = (*DNSIncludeExclude)(unsafe.Pointer(in.Zones)) + return nil +} + +// Convert_core_SeedDNSProvider_To_v1alpha1_SeedDNSProvider is an autogenerated conversion function. +func Convert_core_SeedDNSProvider_To_v1alpha1_SeedDNSProvider(in *core.SeedDNSProvider, out *SeedDNSProvider, s conversion.Scope) error { + return autoConvert_core_SeedDNSProvider_To_v1alpha1_SeedDNSProvider(in, out, s) +} + +func autoConvert_v1alpha1_SeedList_To_core_SeedList(in *SeedList, out *core.SeedList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.Seed, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Seed_To_core_Seed(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1alpha1_SeedList_To_core_SeedList is an autogenerated conversion function. +func Convert_v1alpha1_SeedList_To_core_SeedList(in *SeedList, out *core.SeedList, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedList_To_core_SeedList(in, out, s) +} + +func autoConvert_core_SeedList_To_v1alpha1_SeedList(in *core.SeedList, out *SeedList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Seed, len(*in)) + for i := range *in { + if err := Convert_core_Seed_To_v1alpha1_Seed(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_SeedList_To_v1alpha1_SeedList is an autogenerated conversion function. +func Convert_core_SeedList_To_v1alpha1_SeedList(in *core.SeedList, out *SeedList, s conversion.Scope) error { + return autoConvert_core_SeedList_To_v1alpha1_SeedList(in, out, s) +} + +func autoConvert_v1alpha1_SeedNetworks_To_core_SeedNetworks(in *SeedNetworks, out *core.SeedNetworks, s conversion.Scope) error { + out.Nodes = (*string)(unsafe.Pointer(in.Nodes)) + out.Pods = in.Pods + out.Services = in.Services + out.ShootDefaults = (*core.ShootNetworks)(unsafe.Pointer(in.ShootDefaults)) + return nil +} + +func autoConvert_core_SeedNetworks_To_v1alpha1_SeedNetworks(in *core.SeedNetworks, out *SeedNetworks, s conversion.Scope) error { + out.Nodes = (*string)(unsafe.Pointer(in.Nodes)) + out.Pods = in.Pods + out.Services = in.Services + out.ShootDefaults = (*ShootNetworks)(unsafe.Pointer(in.ShootDefaults)) + // WARNING: in.BlockCIDRs requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1alpha1_SeedProvider_To_core_SeedProvider(in *SeedProvider, out *core.SeedProvider, s conversion.Scope) error { + out.Type = in.Type + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.Region = in.Region + return nil +} + +// Convert_v1alpha1_SeedProvider_To_core_SeedProvider is an autogenerated conversion function. +func Convert_v1alpha1_SeedProvider_To_core_SeedProvider(in *SeedProvider, out *core.SeedProvider, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedProvider_To_core_SeedProvider(in, out, s) +} + +func autoConvert_core_SeedProvider_To_v1alpha1_SeedProvider(in *core.SeedProvider, out *SeedProvider, s conversion.Scope) error { + out.Type = in.Type + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.Region = in.Region + return nil +} + +// Convert_core_SeedProvider_To_v1alpha1_SeedProvider is an autogenerated conversion function. +func Convert_core_SeedProvider_To_v1alpha1_SeedProvider(in *core.SeedProvider, out *SeedProvider, s conversion.Scope) error { + return autoConvert_core_SeedProvider_To_v1alpha1_SeedProvider(in, out, s) +} + +func autoConvert_v1alpha1_SeedSelector_To_core_SeedSelector(in *SeedSelector, out *core.SeedSelector, s conversion.Scope) error { + out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.ProviderTypes = *(*[]string)(unsafe.Pointer(&in.ProviderTypes)) + return nil +} + +// Convert_v1alpha1_SeedSelector_To_core_SeedSelector is an autogenerated conversion function. +func Convert_v1alpha1_SeedSelector_To_core_SeedSelector(in *SeedSelector, out *core.SeedSelector, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedSelector_To_core_SeedSelector(in, out, s) +} + +func autoConvert_core_SeedSelector_To_v1alpha1_SeedSelector(in *core.SeedSelector, out *SeedSelector, s conversion.Scope) error { + out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.ProviderTypes = *(*[]string)(unsafe.Pointer(&in.ProviderTypes)) + return nil +} + +// Convert_core_SeedSelector_To_v1alpha1_SeedSelector is an autogenerated conversion function. +func Convert_core_SeedSelector_To_v1alpha1_SeedSelector(in *core.SeedSelector, out *SeedSelector, s conversion.Scope) error { + return autoConvert_core_SeedSelector_To_v1alpha1_SeedSelector(in, out, s) +} + +func autoConvert_v1alpha1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation(in *SeedSettingExcessCapacityReservation, out *core.SeedSettingExcessCapacityReservation, s conversion.Scope) error { + out.Enabled = in.Enabled + return nil +} + +// Convert_v1alpha1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation is an autogenerated conversion function. +func Convert_v1alpha1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation(in *SeedSettingExcessCapacityReservation, out *core.SeedSettingExcessCapacityReservation, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation(in, out, s) +} + +func autoConvert_core_SeedSettingExcessCapacityReservation_To_v1alpha1_SeedSettingExcessCapacityReservation(in *core.SeedSettingExcessCapacityReservation, out *SeedSettingExcessCapacityReservation, s conversion.Scope) error { + out.Enabled = in.Enabled + return nil +} + +// Convert_core_SeedSettingExcessCapacityReservation_To_v1alpha1_SeedSettingExcessCapacityReservation is an autogenerated conversion function. +func Convert_core_SeedSettingExcessCapacityReservation_To_v1alpha1_SeedSettingExcessCapacityReservation(in *core.SeedSettingExcessCapacityReservation, out *SeedSettingExcessCapacityReservation, s conversion.Scope) error { + return autoConvert_core_SeedSettingExcessCapacityReservation_To_v1alpha1_SeedSettingExcessCapacityReservation(in, out, s) +} + +func autoConvert_v1alpha1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices(in *SeedSettingLoadBalancerServices, out *core.SeedSettingLoadBalancerServices, s conversion.Scope) error { + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + return nil +} + +// Convert_v1alpha1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices is an autogenerated conversion function. +func Convert_v1alpha1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices(in *SeedSettingLoadBalancerServices, out *core.SeedSettingLoadBalancerServices, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices(in, out, s) +} + +func autoConvert_core_SeedSettingLoadBalancerServices_To_v1alpha1_SeedSettingLoadBalancerServices(in *core.SeedSettingLoadBalancerServices, out *SeedSettingLoadBalancerServices, s conversion.Scope) error { + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + return nil +} + +// Convert_core_SeedSettingLoadBalancerServices_To_v1alpha1_SeedSettingLoadBalancerServices is an autogenerated conversion function. +func Convert_core_SeedSettingLoadBalancerServices_To_v1alpha1_SeedSettingLoadBalancerServices(in *core.SeedSettingLoadBalancerServices, out *SeedSettingLoadBalancerServices, s conversion.Scope) error { + return autoConvert_core_SeedSettingLoadBalancerServices_To_v1alpha1_SeedSettingLoadBalancerServices(in, out, s) +} + +func autoConvert_v1alpha1_SeedSettingScheduling_To_core_SeedSettingScheduling(in *SeedSettingScheduling, out *core.SeedSettingScheduling, s conversion.Scope) error { + out.Visible = in.Visible + return nil +} + +// Convert_v1alpha1_SeedSettingScheduling_To_core_SeedSettingScheduling is an autogenerated conversion function. +func Convert_v1alpha1_SeedSettingScheduling_To_core_SeedSettingScheduling(in *SeedSettingScheduling, out *core.SeedSettingScheduling, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedSettingScheduling_To_core_SeedSettingScheduling(in, out, s) +} + +func autoConvert_core_SeedSettingScheduling_To_v1alpha1_SeedSettingScheduling(in *core.SeedSettingScheduling, out *SeedSettingScheduling, s conversion.Scope) error { + out.Visible = in.Visible + return nil +} + +// Convert_core_SeedSettingScheduling_To_v1alpha1_SeedSettingScheduling is an autogenerated conversion function. +func Convert_core_SeedSettingScheduling_To_v1alpha1_SeedSettingScheduling(in *core.SeedSettingScheduling, out *SeedSettingScheduling, s conversion.Scope) error { + return autoConvert_core_SeedSettingScheduling_To_v1alpha1_SeedSettingScheduling(in, out, s) +} + +func autoConvert_v1alpha1_SeedSettingShootDNS_To_core_SeedSettingShootDNS(in *SeedSettingShootDNS, out *core.SeedSettingShootDNS, s conversion.Scope) error { + out.Enabled = in.Enabled + return nil +} + +// Convert_v1alpha1_SeedSettingShootDNS_To_core_SeedSettingShootDNS is an autogenerated conversion function. +func Convert_v1alpha1_SeedSettingShootDNS_To_core_SeedSettingShootDNS(in *SeedSettingShootDNS, out *core.SeedSettingShootDNS, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedSettingShootDNS_To_core_SeedSettingShootDNS(in, out, s) +} + +func autoConvert_core_SeedSettingShootDNS_To_v1alpha1_SeedSettingShootDNS(in *core.SeedSettingShootDNS, out *SeedSettingShootDNS, s conversion.Scope) error { + out.Enabled = in.Enabled + return nil +} + +// Convert_core_SeedSettingShootDNS_To_v1alpha1_SeedSettingShootDNS is an autogenerated conversion function. +func Convert_core_SeedSettingShootDNS_To_v1alpha1_SeedSettingShootDNS(in *core.SeedSettingShootDNS, out *SeedSettingShootDNS, s conversion.Scope) error { + return autoConvert_core_SeedSettingShootDNS_To_v1alpha1_SeedSettingShootDNS(in, out, s) +} + +func autoConvert_v1alpha1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler(in *SeedSettingVerticalPodAutoscaler, out *core.SeedSettingVerticalPodAutoscaler, s conversion.Scope) error { + out.Enabled = in.Enabled + return nil +} + +// Convert_v1alpha1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler is an autogenerated conversion function. +func Convert_v1alpha1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler(in *SeedSettingVerticalPodAutoscaler, out *core.SeedSettingVerticalPodAutoscaler, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler(in, out, s) +} + +func autoConvert_core_SeedSettingVerticalPodAutoscaler_To_v1alpha1_SeedSettingVerticalPodAutoscaler(in *core.SeedSettingVerticalPodAutoscaler, out *SeedSettingVerticalPodAutoscaler, s conversion.Scope) error { + out.Enabled = in.Enabled + return nil +} + +// Convert_core_SeedSettingVerticalPodAutoscaler_To_v1alpha1_SeedSettingVerticalPodAutoscaler is an autogenerated conversion function. +func Convert_core_SeedSettingVerticalPodAutoscaler_To_v1alpha1_SeedSettingVerticalPodAutoscaler(in *core.SeedSettingVerticalPodAutoscaler, out *SeedSettingVerticalPodAutoscaler, s conversion.Scope) error { + return autoConvert_core_SeedSettingVerticalPodAutoscaler_To_v1alpha1_SeedSettingVerticalPodAutoscaler(in, out, s) +} + +func autoConvert_v1alpha1_SeedSettings_To_core_SeedSettings(in *SeedSettings, out *core.SeedSettings, s conversion.Scope) error { + out.ExcessCapacityReservation = (*core.SeedSettingExcessCapacityReservation)(unsafe.Pointer(in.ExcessCapacityReservation)) + out.Scheduling = (*core.SeedSettingScheduling)(unsafe.Pointer(in.Scheduling)) + out.ShootDNS = (*core.SeedSettingShootDNS)(unsafe.Pointer(in.ShootDNS)) + out.LoadBalancerServices = (*core.SeedSettingLoadBalancerServices)(unsafe.Pointer(in.LoadBalancerServices)) + out.VerticalPodAutoscaler = (*core.SeedSettingVerticalPodAutoscaler)(unsafe.Pointer(in.VerticalPodAutoscaler)) + return nil +} + +// Convert_v1alpha1_SeedSettings_To_core_SeedSettings is an autogenerated conversion function. +func Convert_v1alpha1_SeedSettings_To_core_SeedSettings(in *SeedSettings, out *core.SeedSettings, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedSettings_To_core_SeedSettings(in, out, s) +} + +func autoConvert_core_SeedSettings_To_v1alpha1_SeedSettings(in *core.SeedSettings, out *SeedSettings, s conversion.Scope) error { + out.ExcessCapacityReservation = (*SeedSettingExcessCapacityReservation)(unsafe.Pointer(in.ExcessCapacityReservation)) + out.Scheduling = (*SeedSettingScheduling)(unsafe.Pointer(in.Scheduling)) + out.ShootDNS = (*SeedSettingShootDNS)(unsafe.Pointer(in.ShootDNS)) + out.LoadBalancerServices = (*SeedSettingLoadBalancerServices)(unsafe.Pointer(in.LoadBalancerServices)) + out.VerticalPodAutoscaler = (*SeedSettingVerticalPodAutoscaler)(unsafe.Pointer(in.VerticalPodAutoscaler)) + return nil +} + +// Convert_core_SeedSettings_To_v1alpha1_SeedSettings is an autogenerated conversion function. +func Convert_core_SeedSettings_To_v1alpha1_SeedSettings(in *core.SeedSettings, out *SeedSettings, s conversion.Scope) error { + return autoConvert_core_SeedSettings_To_v1alpha1_SeedSettings(in, out, s) +} + +func autoConvert_v1alpha1_SeedSpec_To_core_SeedSpec(in *SeedSpec, out *core.SeedSpec, s conversion.Scope) error { + out.Backup = (*core.SeedBackup)(unsafe.Pointer(in.Backup)) + // WARNING: in.BlockCIDRs requires manual conversion: does not exist in peer-type + if err := Convert_v1alpha1_SeedDNS_To_core_SeedDNS(&in.DNS, &out.DNS, s); err != nil { + return err + } + if err := Convert_v1alpha1_SeedNetworks_To_core_SeedNetworks(&in.Networks, &out.Networks, s); err != nil { + return err + } + if err := Convert_v1alpha1_SeedProvider_To_core_SeedProvider(&in.Provider, &out.Provider, s); err != nil { + return err + } + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.Taints = *(*[]core.SeedTaint)(unsafe.Pointer(&in.Taints)) + out.Volume = (*core.SeedVolume)(unsafe.Pointer(in.Volume)) + out.Settings = (*core.SeedSettings)(unsafe.Pointer(in.Settings)) + out.Ingress = (*core.Ingress)(unsafe.Pointer(in.Ingress)) + return nil +} + +func autoConvert_core_SeedSpec_To_v1alpha1_SeedSpec(in *core.SeedSpec, out *SeedSpec, s conversion.Scope) error { + out.Backup = (*SeedBackup)(unsafe.Pointer(in.Backup)) + if err := Convert_core_SeedDNS_To_v1alpha1_SeedDNS(&in.DNS, &out.DNS, s); err != nil { + return err + } + if err := Convert_core_SeedNetworks_To_v1alpha1_SeedNetworks(&in.Networks, &out.Networks, s); err != nil { + return err + } + if err := Convert_core_SeedProvider_To_v1alpha1_SeedProvider(&in.Provider, &out.Provider, s); err != nil { + return err + } + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.Settings = (*SeedSettings)(unsafe.Pointer(in.Settings)) + out.Taints = *(*[]SeedTaint)(unsafe.Pointer(&in.Taints)) + out.Volume = (*SeedVolume)(unsafe.Pointer(in.Volume)) + out.Ingress = (*Ingress)(unsafe.Pointer(in.Ingress)) + return nil +} + +func autoConvert_v1alpha1_SeedStatus_To_core_SeedStatus(in *SeedStatus, out *core.SeedStatus, s conversion.Scope) error { + out.Conditions = *(*[]core.Condition)(unsafe.Pointer(&in.Conditions)) + out.Gardener = (*core.Gardener)(unsafe.Pointer(in.Gardener)) + out.KubernetesVersion = (*string)(unsafe.Pointer(in.KubernetesVersion)) + out.ObservedGeneration = in.ObservedGeneration + out.ClusterIdentity = (*string)(unsafe.Pointer(in.ClusterIdentity)) + return nil +} + +// Convert_v1alpha1_SeedStatus_To_core_SeedStatus is an autogenerated conversion function. +func Convert_v1alpha1_SeedStatus_To_core_SeedStatus(in *SeedStatus, out *core.SeedStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedStatus_To_core_SeedStatus(in, out, s) +} + +func autoConvert_core_SeedStatus_To_v1alpha1_SeedStatus(in *core.SeedStatus, out *SeedStatus, s conversion.Scope) error { + out.Gardener = (*Gardener)(unsafe.Pointer(in.Gardener)) + out.KubernetesVersion = (*string)(unsafe.Pointer(in.KubernetesVersion)) + out.Conditions = *(*[]Condition)(unsafe.Pointer(&in.Conditions)) + out.ObservedGeneration = in.ObservedGeneration + out.ClusterIdentity = (*string)(unsafe.Pointer(in.ClusterIdentity)) + // WARNING: in.Capacity requires manual conversion: does not exist in peer-type + // WARNING: in.Allocatable requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1alpha1_SeedTaint_To_core_SeedTaint(in *SeedTaint, out *core.SeedTaint, s conversion.Scope) error { + out.Key = in.Key + out.Value = (*string)(unsafe.Pointer(in.Value)) + return nil +} + +// Convert_v1alpha1_SeedTaint_To_core_SeedTaint is an autogenerated conversion function. +func Convert_v1alpha1_SeedTaint_To_core_SeedTaint(in *SeedTaint, out *core.SeedTaint, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedTaint_To_core_SeedTaint(in, out, s) +} + +func autoConvert_core_SeedTaint_To_v1alpha1_SeedTaint(in *core.SeedTaint, out *SeedTaint, s conversion.Scope) error { + out.Key = in.Key + out.Value = (*string)(unsafe.Pointer(in.Value)) + return nil +} + +// Convert_core_SeedTaint_To_v1alpha1_SeedTaint is an autogenerated conversion function. +func Convert_core_SeedTaint_To_v1alpha1_SeedTaint(in *core.SeedTaint, out *SeedTaint, s conversion.Scope) error { + return autoConvert_core_SeedTaint_To_v1alpha1_SeedTaint(in, out, s) +} + +func autoConvert_v1alpha1_SeedVolume_To_core_SeedVolume(in *SeedVolume, out *core.SeedVolume, s conversion.Scope) error { + out.MinimumSize = (*resource.Quantity)(unsafe.Pointer(in.MinimumSize)) + out.Providers = *(*[]core.SeedVolumeProvider)(unsafe.Pointer(&in.Providers)) + return nil +} + +// Convert_v1alpha1_SeedVolume_To_core_SeedVolume is an autogenerated conversion function. +func Convert_v1alpha1_SeedVolume_To_core_SeedVolume(in *SeedVolume, out *core.SeedVolume, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedVolume_To_core_SeedVolume(in, out, s) +} + +func autoConvert_core_SeedVolume_To_v1alpha1_SeedVolume(in *core.SeedVolume, out *SeedVolume, s conversion.Scope) error { + out.MinimumSize = (*resource.Quantity)(unsafe.Pointer(in.MinimumSize)) + out.Providers = *(*[]SeedVolumeProvider)(unsafe.Pointer(&in.Providers)) + return nil +} + +// Convert_core_SeedVolume_To_v1alpha1_SeedVolume is an autogenerated conversion function. +func Convert_core_SeedVolume_To_v1alpha1_SeedVolume(in *core.SeedVolume, out *SeedVolume, s conversion.Scope) error { + return autoConvert_core_SeedVolume_To_v1alpha1_SeedVolume(in, out, s) +} + +func autoConvert_v1alpha1_SeedVolumeProvider_To_core_SeedVolumeProvider(in *SeedVolumeProvider, out *core.SeedVolumeProvider, s conversion.Scope) error { + out.Purpose = in.Purpose + out.Name = in.Name + return nil +} + +// Convert_v1alpha1_SeedVolumeProvider_To_core_SeedVolumeProvider is an autogenerated conversion function. +func Convert_v1alpha1_SeedVolumeProvider_To_core_SeedVolumeProvider(in *SeedVolumeProvider, out *core.SeedVolumeProvider, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedVolumeProvider_To_core_SeedVolumeProvider(in, out, s) +} + +func autoConvert_core_SeedVolumeProvider_To_v1alpha1_SeedVolumeProvider(in *core.SeedVolumeProvider, out *SeedVolumeProvider, s conversion.Scope) error { + out.Purpose = in.Purpose + out.Name = in.Name + return nil +} + +// Convert_core_SeedVolumeProvider_To_v1alpha1_SeedVolumeProvider is an autogenerated conversion function. +func Convert_core_SeedVolumeProvider_To_v1alpha1_SeedVolumeProvider(in *core.SeedVolumeProvider, out *SeedVolumeProvider, s conversion.Scope) error { + return autoConvert_core_SeedVolumeProvider_To_v1alpha1_SeedVolumeProvider(in, out, s) +} + +func autoConvert_v1alpha1_ServiceAccountConfig_To_core_ServiceAccountConfig(in *ServiceAccountConfig, out *core.ServiceAccountConfig, s conversion.Scope) error { + out.Issuer = (*string)(unsafe.Pointer(in.Issuer)) + out.SigningKeySecret = (*v1.LocalObjectReference)(unsafe.Pointer(in.SigningKeySecret)) + return nil +} + +// Convert_v1alpha1_ServiceAccountConfig_To_core_ServiceAccountConfig is an autogenerated conversion function. +func Convert_v1alpha1_ServiceAccountConfig_To_core_ServiceAccountConfig(in *ServiceAccountConfig, out *core.ServiceAccountConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_ServiceAccountConfig_To_core_ServiceAccountConfig(in, out, s) +} + +func autoConvert_core_ServiceAccountConfig_To_v1alpha1_ServiceAccountConfig(in *core.ServiceAccountConfig, out *ServiceAccountConfig, s conversion.Scope) error { + out.Issuer = (*string)(unsafe.Pointer(in.Issuer)) + out.SigningKeySecret = (*v1.LocalObjectReference)(unsafe.Pointer(in.SigningKeySecret)) + return nil +} + +// Convert_core_ServiceAccountConfig_To_v1alpha1_ServiceAccountConfig is an autogenerated conversion function. +func Convert_core_ServiceAccountConfig_To_v1alpha1_ServiceAccountConfig(in *core.ServiceAccountConfig, out *ServiceAccountConfig, s conversion.Scope) error { + return autoConvert_core_ServiceAccountConfig_To_v1alpha1_ServiceAccountConfig(in, out, s) +} + +func autoConvert_v1alpha1_Shoot_To_core_Shoot(in *Shoot, out *core.Shoot, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_ShootSpec_To_core_ShootSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_ShootStatus_To_core_ShootStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_Shoot_To_core_Shoot is an autogenerated conversion function. +func Convert_v1alpha1_Shoot_To_core_Shoot(in *Shoot, out *core.Shoot, s conversion.Scope) error { + return autoConvert_v1alpha1_Shoot_To_core_Shoot(in, out, s) +} + +func autoConvert_core_Shoot_To_v1alpha1_Shoot(in *core.Shoot, out *Shoot, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ShootSpec_To_v1alpha1_ShootSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_ShootStatus_To_v1alpha1_ShootStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_Shoot_To_v1alpha1_Shoot is an autogenerated conversion function. +func Convert_core_Shoot_To_v1alpha1_Shoot(in *core.Shoot, out *Shoot, s conversion.Scope) error { + return autoConvert_core_Shoot_To_v1alpha1_Shoot(in, out, s) +} + +func autoConvert_v1alpha1_ShootList_To_core_ShootList(in *ShootList, out *core.ShootList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.Shoot, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Shoot_To_core_Shoot(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1alpha1_ShootList_To_core_ShootList is an autogenerated conversion function. +func Convert_v1alpha1_ShootList_To_core_ShootList(in *ShootList, out *core.ShootList, s conversion.Scope) error { + return autoConvert_v1alpha1_ShootList_To_core_ShootList(in, out, s) +} + +func autoConvert_core_ShootList_To_v1alpha1_ShootList(in *core.ShootList, out *ShootList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Shoot, len(*in)) + for i := range *in { + if err := Convert_core_Shoot_To_v1alpha1_Shoot(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_ShootList_To_v1alpha1_ShootList is an autogenerated conversion function. +func Convert_core_ShootList_To_v1alpha1_ShootList(in *core.ShootList, out *ShootList, s conversion.Scope) error { + return autoConvert_core_ShootList_To_v1alpha1_ShootList(in, out, s) +} + +func autoConvert_v1alpha1_ShootMachineImage_To_core_ShootMachineImage(in *ShootMachineImage, out *core.ShootMachineImage, s conversion.Scope) error { + out.Name = in.Name + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + if err := metav1.Convert_Pointer_string_To_string(&in.Version, &out.Version, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_ShootMachineImage_To_core_ShootMachineImage is an autogenerated conversion function. +func Convert_v1alpha1_ShootMachineImage_To_core_ShootMachineImage(in *ShootMachineImage, out *core.ShootMachineImage, s conversion.Scope) error { + return autoConvert_v1alpha1_ShootMachineImage_To_core_ShootMachineImage(in, out, s) +} + +func autoConvert_core_ShootMachineImage_To_v1alpha1_ShootMachineImage(in *core.ShootMachineImage, out *ShootMachineImage, s conversion.Scope) error { + out.Name = in.Name + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + if err := metav1.Convert_string_To_Pointer_string(&in.Version, &out.Version, s); err != nil { + return err + } + return nil +} + +// Convert_core_ShootMachineImage_To_v1alpha1_ShootMachineImage is an autogenerated conversion function. +func Convert_core_ShootMachineImage_To_v1alpha1_ShootMachineImage(in *core.ShootMachineImage, out *ShootMachineImage, s conversion.Scope) error { + return autoConvert_core_ShootMachineImage_To_v1alpha1_ShootMachineImage(in, out, s) +} + +func autoConvert_v1alpha1_ShootNetworks_To_core_ShootNetworks(in *ShootNetworks, out *core.ShootNetworks, s conversion.Scope) error { + out.Pods = (*string)(unsafe.Pointer(in.Pods)) + out.Services = (*string)(unsafe.Pointer(in.Services)) + return nil +} + +// Convert_v1alpha1_ShootNetworks_To_core_ShootNetworks is an autogenerated conversion function. +func Convert_v1alpha1_ShootNetworks_To_core_ShootNetworks(in *ShootNetworks, out *core.ShootNetworks, s conversion.Scope) error { + return autoConvert_v1alpha1_ShootNetworks_To_core_ShootNetworks(in, out, s) +} + +func autoConvert_core_ShootNetworks_To_v1alpha1_ShootNetworks(in *core.ShootNetworks, out *ShootNetworks, s conversion.Scope) error { + out.Pods = (*string)(unsafe.Pointer(in.Pods)) + out.Services = (*string)(unsafe.Pointer(in.Services)) + return nil +} + +// Convert_core_ShootNetworks_To_v1alpha1_ShootNetworks is an autogenerated conversion function. +func Convert_core_ShootNetworks_To_v1alpha1_ShootNetworks(in *core.ShootNetworks, out *ShootNetworks, s conversion.Scope) error { + return autoConvert_core_ShootNetworks_To_v1alpha1_ShootNetworks(in, out, s) +} + +func autoConvert_v1alpha1_ShootSpec_To_core_ShootSpec(in *ShootSpec, out *core.ShootSpec, s conversion.Scope) error { + out.Addons = (*core.Addons)(unsafe.Pointer(in.Addons)) + out.CloudProfileName = in.CloudProfileName + out.DNS = (*core.DNS)(unsafe.Pointer(in.DNS)) + out.Extensions = *(*[]core.Extension)(unsafe.Pointer(&in.Extensions)) + out.Hibernation = (*core.Hibernation)(unsafe.Pointer(in.Hibernation)) + if err := Convert_v1alpha1_Kubernetes_To_core_Kubernetes(&in.Kubernetes, &out.Kubernetes, s); err != nil { + return err + } + if err := Convert_v1alpha1_Networking_To_core_Networking(&in.Networking, &out.Networking, s); err != nil { + return err + } + out.Maintenance = (*core.Maintenance)(unsafe.Pointer(in.Maintenance)) + out.Monitoring = (*core.Monitoring)(unsafe.Pointer(in.Monitoring)) + if err := Convert_v1alpha1_Provider_To_core_Provider(&in.Provider, &out.Provider, s); err != nil { + return err + } + out.Purpose = (*core.ShootPurpose)(unsafe.Pointer(in.Purpose)) + out.Region = in.Region + out.SecretBindingName = in.SecretBindingName + out.SeedName = (*string)(unsafe.Pointer(in.SeedName)) + out.SeedSelector = (*core.SeedSelector)(unsafe.Pointer(in.SeedSelector)) + out.Resources = *(*[]core.NamedResourceReference)(unsafe.Pointer(&in.Resources)) + out.Tolerations = *(*[]core.Toleration)(unsafe.Pointer(&in.Tolerations)) + return nil +} + +// Convert_v1alpha1_ShootSpec_To_core_ShootSpec is an autogenerated conversion function. +func Convert_v1alpha1_ShootSpec_To_core_ShootSpec(in *ShootSpec, out *core.ShootSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_ShootSpec_To_core_ShootSpec(in, out, s) +} + +func autoConvert_core_ShootSpec_To_v1alpha1_ShootSpec(in *core.ShootSpec, out *ShootSpec, s conversion.Scope) error { + out.Addons = (*Addons)(unsafe.Pointer(in.Addons)) + out.CloudProfileName = in.CloudProfileName + out.DNS = (*DNS)(unsafe.Pointer(in.DNS)) + out.Extensions = *(*[]Extension)(unsafe.Pointer(&in.Extensions)) + out.Hibernation = (*Hibernation)(unsafe.Pointer(in.Hibernation)) + if err := Convert_core_Kubernetes_To_v1alpha1_Kubernetes(&in.Kubernetes, &out.Kubernetes, s); err != nil { + return err + } + if err := Convert_core_Networking_To_v1alpha1_Networking(&in.Networking, &out.Networking, s); err != nil { + return err + } + out.Maintenance = (*Maintenance)(unsafe.Pointer(in.Maintenance)) + out.Monitoring = (*Monitoring)(unsafe.Pointer(in.Monitoring)) + if err := Convert_core_Provider_To_v1alpha1_Provider(&in.Provider, &out.Provider, s); err != nil { + return err + } + out.Purpose = (*ShootPurpose)(unsafe.Pointer(in.Purpose)) + out.Region = in.Region + out.SecretBindingName = in.SecretBindingName + out.SeedName = (*string)(unsafe.Pointer(in.SeedName)) + out.SeedSelector = (*SeedSelector)(unsafe.Pointer(in.SeedSelector)) + out.Resources = *(*[]NamedResourceReference)(unsafe.Pointer(&in.Resources)) + out.Tolerations = *(*[]Toleration)(unsafe.Pointer(&in.Tolerations)) + return nil +} + +// Convert_core_ShootSpec_To_v1alpha1_ShootSpec is an autogenerated conversion function. +func Convert_core_ShootSpec_To_v1alpha1_ShootSpec(in *core.ShootSpec, out *ShootSpec, s conversion.Scope) error { + return autoConvert_core_ShootSpec_To_v1alpha1_ShootSpec(in, out, s) +} + +func autoConvert_v1alpha1_ShootState_To_core_ShootState(in *ShootState, out *core.ShootState, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_ShootStateSpec_To_core_ShootStateSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_ShootState_To_core_ShootState is an autogenerated conversion function. +func Convert_v1alpha1_ShootState_To_core_ShootState(in *ShootState, out *core.ShootState, s conversion.Scope) error { + return autoConvert_v1alpha1_ShootState_To_core_ShootState(in, out, s) +} + +func autoConvert_core_ShootState_To_v1alpha1_ShootState(in *core.ShootState, out *ShootState, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ShootStateSpec_To_v1alpha1_ShootStateSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_core_ShootState_To_v1alpha1_ShootState is an autogenerated conversion function. +func Convert_core_ShootState_To_v1alpha1_ShootState(in *core.ShootState, out *ShootState, s conversion.Scope) error { + return autoConvert_core_ShootState_To_v1alpha1_ShootState(in, out, s) +} + +func autoConvert_v1alpha1_ShootStateList_To_core_ShootStateList(in *ShootStateList, out *core.ShootStateList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ShootState)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_ShootStateList_To_core_ShootStateList is an autogenerated conversion function. +func Convert_v1alpha1_ShootStateList_To_core_ShootStateList(in *ShootStateList, out *core.ShootStateList, s conversion.Scope) error { + return autoConvert_v1alpha1_ShootStateList_To_core_ShootStateList(in, out, s) +} + +func autoConvert_core_ShootStateList_To_v1alpha1_ShootStateList(in *core.ShootStateList, out *ShootStateList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]ShootState)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ShootStateList_To_v1alpha1_ShootStateList is an autogenerated conversion function. +func Convert_core_ShootStateList_To_v1alpha1_ShootStateList(in *core.ShootStateList, out *ShootStateList, s conversion.Scope) error { + return autoConvert_core_ShootStateList_To_v1alpha1_ShootStateList(in, out, s) +} + +func autoConvert_v1alpha1_ShootStateSpec_To_core_ShootStateSpec(in *ShootStateSpec, out *core.ShootStateSpec, s conversion.Scope) error { + out.Gardener = *(*[]core.GardenerResourceData)(unsafe.Pointer(&in.Gardener)) + out.Extensions = *(*[]core.ExtensionResourceState)(unsafe.Pointer(&in.Extensions)) + out.Resources = *(*[]core.ResourceData)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_v1alpha1_ShootStateSpec_To_core_ShootStateSpec is an autogenerated conversion function. +func Convert_v1alpha1_ShootStateSpec_To_core_ShootStateSpec(in *ShootStateSpec, out *core.ShootStateSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_ShootStateSpec_To_core_ShootStateSpec(in, out, s) +} + +func autoConvert_core_ShootStateSpec_To_v1alpha1_ShootStateSpec(in *core.ShootStateSpec, out *ShootStateSpec, s conversion.Scope) error { + out.Gardener = *(*[]GardenerResourceData)(unsafe.Pointer(&in.Gardener)) + out.Extensions = *(*[]ExtensionResourceState)(unsafe.Pointer(&in.Extensions)) + out.Resources = *(*[]ResourceData)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_core_ShootStateSpec_To_v1alpha1_ShootStateSpec is an autogenerated conversion function. +func Convert_core_ShootStateSpec_To_v1alpha1_ShootStateSpec(in *core.ShootStateSpec, out *ShootStateSpec, s conversion.Scope) error { + return autoConvert_core_ShootStateSpec_To_v1alpha1_ShootStateSpec(in, out, s) +} + +func autoConvert_v1alpha1_ShootStatus_To_core_ShootStatus(in *ShootStatus, out *core.ShootStatus, s conversion.Scope) error { + out.Conditions = *(*[]core.Condition)(unsafe.Pointer(&in.Conditions)) + out.Constraints = *(*[]core.Condition)(unsafe.Pointer(&in.Constraints)) + if err := Convert_v1alpha1_Gardener_To_core_Gardener(&in.Gardener, &out.Gardener, s); err != nil { + return err + } + out.IsHibernated = in.IsHibernated + out.LastOperation = (*core.LastOperation)(unsafe.Pointer(in.LastOperation)) + // WARNING: in.LastError requires manual conversion: does not exist in peer-type + out.LastErrors = *(*[]core.LastError)(unsafe.Pointer(&in.LastErrors)) + out.ObservedGeneration = in.ObservedGeneration + out.RetryCycleStartTime = (*metav1.Time)(unsafe.Pointer(in.RetryCycleStartTime)) + // WARNING: in.Seed requires manual conversion: does not exist in peer-type + out.TechnicalID = in.TechnicalID + out.UID = types.UID(in.UID) + out.ClusterIdentity = (*string)(unsafe.Pointer(in.ClusterIdentity)) + return nil +} + +func autoConvert_core_ShootStatus_To_v1alpha1_ShootStatus(in *core.ShootStatus, out *ShootStatus, s conversion.Scope) error { + out.Conditions = *(*[]Condition)(unsafe.Pointer(&in.Conditions)) + out.Constraints = *(*[]Condition)(unsafe.Pointer(&in.Constraints)) + if err := Convert_core_Gardener_To_v1alpha1_Gardener(&in.Gardener, &out.Gardener, s); err != nil { + return err + } + out.IsHibernated = in.IsHibernated + out.LastOperation = (*LastOperation)(unsafe.Pointer(in.LastOperation)) + out.LastErrors = *(*[]LastError)(unsafe.Pointer(&in.LastErrors)) + out.ObservedGeneration = in.ObservedGeneration + out.RetryCycleStartTime = (*metav1.Time)(unsafe.Pointer(in.RetryCycleStartTime)) + // WARNING: in.SeedName requires manual conversion: does not exist in peer-type + out.TechnicalID = in.TechnicalID + out.UID = types.UID(in.UID) + out.ClusterIdentity = (*string)(unsafe.Pointer(in.ClusterIdentity)) + return nil +} + +func autoConvert_v1alpha1_Toleration_To_core_Toleration(in *Toleration, out *core.Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Value = (*string)(unsafe.Pointer(in.Value)) + return nil +} + +// Convert_v1alpha1_Toleration_To_core_Toleration is an autogenerated conversion function. +func Convert_v1alpha1_Toleration_To_core_Toleration(in *Toleration, out *core.Toleration, s conversion.Scope) error { + return autoConvert_v1alpha1_Toleration_To_core_Toleration(in, out, s) +} + +func autoConvert_core_Toleration_To_v1alpha1_Toleration(in *core.Toleration, out *Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Value = (*string)(unsafe.Pointer(in.Value)) + return nil +} + +// Convert_core_Toleration_To_v1alpha1_Toleration is an autogenerated conversion function. +func Convert_core_Toleration_To_v1alpha1_Toleration(in *core.Toleration, out *Toleration, s conversion.Scope) error { + return autoConvert_core_Toleration_To_v1alpha1_Toleration(in, out, s) +} + +func autoConvert_v1alpha1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler(in *VerticalPodAutoscaler, out *core.VerticalPodAutoscaler, s conversion.Scope) error { + out.Enabled = in.Enabled + out.EvictAfterOOMThreshold = (*metav1.Duration)(unsafe.Pointer(in.EvictAfterOOMThreshold)) + out.EvictionRateBurst = (*int32)(unsafe.Pointer(in.EvictionRateBurst)) + out.EvictionRateLimit = (*float64)(unsafe.Pointer(in.EvictionRateLimit)) + out.EvictionTolerance = (*float64)(unsafe.Pointer(in.EvictionTolerance)) + out.RecommendationMarginFraction = (*float64)(unsafe.Pointer(in.RecommendationMarginFraction)) + out.UpdaterInterval = (*metav1.Duration)(unsafe.Pointer(in.UpdaterInterval)) + out.RecommenderInterval = (*metav1.Duration)(unsafe.Pointer(in.RecommenderInterval)) + return nil +} + +// Convert_v1alpha1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler is an autogenerated conversion function. +func Convert_v1alpha1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler(in *VerticalPodAutoscaler, out *core.VerticalPodAutoscaler, s conversion.Scope) error { + return autoConvert_v1alpha1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler(in, out, s) +} + +func autoConvert_core_VerticalPodAutoscaler_To_v1alpha1_VerticalPodAutoscaler(in *core.VerticalPodAutoscaler, out *VerticalPodAutoscaler, s conversion.Scope) error { + out.Enabled = in.Enabled + out.EvictAfterOOMThreshold = (*metav1.Duration)(unsafe.Pointer(in.EvictAfterOOMThreshold)) + out.EvictionRateBurst = (*int32)(unsafe.Pointer(in.EvictionRateBurst)) + out.EvictionRateLimit = (*float64)(unsafe.Pointer(in.EvictionRateLimit)) + out.EvictionTolerance = (*float64)(unsafe.Pointer(in.EvictionTolerance)) + out.RecommendationMarginFraction = (*float64)(unsafe.Pointer(in.RecommendationMarginFraction)) + out.UpdaterInterval = (*metav1.Duration)(unsafe.Pointer(in.UpdaterInterval)) + out.RecommenderInterval = (*metav1.Duration)(unsafe.Pointer(in.RecommenderInterval)) + return nil +} + +// Convert_core_VerticalPodAutoscaler_To_v1alpha1_VerticalPodAutoscaler is an autogenerated conversion function. +func Convert_core_VerticalPodAutoscaler_To_v1alpha1_VerticalPodAutoscaler(in *core.VerticalPodAutoscaler, out *VerticalPodAutoscaler, s conversion.Scope) error { + return autoConvert_core_VerticalPodAutoscaler_To_v1alpha1_VerticalPodAutoscaler(in, out, s) +} + +func autoConvert_v1alpha1_Volume_To_core_Volume(in *Volume, out *core.Volume, s conversion.Scope) error { + out.Name = (*string)(unsafe.Pointer(in.Name)) + out.Type = (*string)(unsafe.Pointer(in.Type)) + out.VolumeSize = in.VolumeSize + out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted)) + return nil +} + +// Convert_v1alpha1_Volume_To_core_Volume is an autogenerated conversion function. +func Convert_v1alpha1_Volume_To_core_Volume(in *Volume, out *core.Volume, s conversion.Scope) error { + return autoConvert_v1alpha1_Volume_To_core_Volume(in, out, s) +} + +func autoConvert_core_Volume_To_v1alpha1_Volume(in *core.Volume, out *Volume, s conversion.Scope) error { + out.Name = (*string)(unsafe.Pointer(in.Name)) + out.Type = (*string)(unsafe.Pointer(in.Type)) + out.VolumeSize = in.VolumeSize + out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted)) + return nil +} + +// Convert_core_Volume_To_v1alpha1_Volume is an autogenerated conversion function. +func Convert_core_Volume_To_v1alpha1_Volume(in *core.Volume, out *Volume, s conversion.Scope) error { + return autoConvert_core_Volume_To_v1alpha1_Volume(in, out, s) +} + +func autoConvert_v1alpha1_VolumeType_To_core_VolumeType(in *VolumeType, out *core.VolumeType, s conversion.Scope) error { + out.Class = in.Class + out.Name = in.Name + out.Usable = (*bool)(unsafe.Pointer(in.Usable)) + return nil +} + +// Convert_v1alpha1_VolumeType_To_core_VolumeType is an autogenerated conversion function. +func Convert_v1alpha1_VolumeType_To_core_VolumeType(in *VolumeType, out *core.VolumeType, s conversion.Scope) error { + return autoConvert_v1alpha1_VolumeType_To_core_VolumeType(in, out, s) +} + +func autoConvert_core_VolumeType_To_v1alpha1_VolumeType(in *core.VolumeType, out *VolumeType, s conversion.Scope) error { + out.Class = in.Class + out.Name = in.Name + out.Usable = (*bool)(unsafe.Pointer(in.Usable)) + return nil +} + +// Convert_core_VolumeType_To_v1alpha1_VolumeType is an autogenerated conversion function. +func Convert_core_VolumeType_To_v1alpha1_VolumeType(in *core.VolumeType, out *VolumeType, s conversion.Scope) error { + return autoConvert_core_VolumeType_To_v1alpha1_VolumeType(in, out, s) +} + +func autoConvert_v1alpha1_WatchCacheSizes_To_core_WatchCacheSizes(in *WatchCacheSizes, out *core.WatchCacheSizes, s conversion.Scope) error { + out.Default = (*int32)(unsafe.Pointer(in.Default)) + out.Resources = *(*[]core.ResourceWatchCacheSize)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_v1alpha1_WatchCacheSizes_To_core_WatchCacheSizes is an autogenerated conversion function. +func Convert_v1alpha1_WatchCacheSizes_To_core_WatchCacheSizes(in *WatchCacheSizes, out *core.WatchCacheSizes, s conversion.Scope) error { + return autoConvert_v1alpha1_WatchCacheSizes_To_core_WatchCacheSizes(in, out, s) +} + +func autoConvert_core_WatchCacheSizes_To_v1alpha1_WatchCacheSizes(in *core.WatchCacheSizes, out *WatchCacheSizes, s conversion.Scope) error { + out.Default = (*int32)(unsafe.Pointer(in.Default)) + out.Resources = *(*[]ResourceWatchCacheSize)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_core_WatchCacheSizes_To_v1alpha1_WatchCacheSizes is an autogenerated conversion function. +func Convert_core_WatchCacheSizes_To_v1alpha1_WatchCacheSizes(in *core.WatchCacheSizes, out *WatchCacheSizes, s conversion.Scope) error { + return autoConvert_core_WatchCacheSizes_To_v1alpha1_WatchCacheSizes(in, out, s) +} + +func autoConvert_v1alpha1_Worker_To_core_Worker(in *Worker, out *core.Worker, s conversion.Scope) error { + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + out.CABundle = (*string)(unsafe.Pointer(in.CABundle)) + out.CRI = (*core.CRI)(unsafe.Pointer(in.CRI)) + out.Kubernetes = (*core.WorkerKubernetes)(unsafe.Pointer(in.Kubernetes)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + out.Name = in.Name + if err := Convert_v1alpha1_Machine_To_core_Machine(&in.Machine, &out.Machine, s); err != nil { + return err + } + out.Maximum = in.Maximum + out.Minimum = in.Minimum + out.MaxSurge = (*intstr.IntOrString)(unsafe.Pointer(in.MaxSurge)) + out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints)) + out.Volume = (*core.Volume)(unsafe.Pointer(in.Volume)) + out.DataVolumes = *(*[]core.DataVolume)(unsafe.Pointer(&in.DataVolumes)) + out.KubeletDataVolumeName = (*string)(unsafe.Pointer(in.KubeletDataVolumeName)) + out.Zones = *(*[]string)(unsafe.Pointer(&in.Zones)) + out.SystemComponents = (*core.WorkerSystemComponents)(unsafe.Pointer(in.SystemComponents)) + out.MachineControllerManagerSettings = (*core.MachineControllerManagerSettings)(unsafe.Pointer(in.MachineControllerManagerSettings)) + return nil +} + +// Convert_v1alpha1_Worker_To_core_Worker is an autogenerated conversion function. +func Convert_v1alpha1_Worker_To_core_Worker(in *Worker, out *core.Worker, s conversion.Scope) error { + return autoConvert_v1alpha1_Worker_To_core_Worker(in, out, s) +} + +func autoConvert_core_Worker_To_v1alpha1_Worker(in *core.Worker, out *Worker, s conversion.Scope) error { + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + out.CABundle = (*string)(unsafe.Pointer(in.CABundle)) + out.CRI = (*CRI)(unsafe.Pointer(in.CRI)) + out.Kubernetes = (*WorkerKubernetes)(unsafe.Pointer(in.Kubernetes)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + out.Name = in.Name + if err := Convert_core_Machine_To_v1alpha1_Machine(&in.Machine, &out.Machine, s); err != nil { + return err + } + out.Maximum = in.Maximum + out.Minimum = in.Minimum + out.MaxSurge = (*intstr.IntOrString)(unsafe.Pointer(in.MaxSurge)) + out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.SystemComponents = (*WorkerSystemComponents)(unsafe.Pointer(in.SystemComponents)) + out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints)) + out.Volume = (*Volume)(unsafe.Pointer(in.Volume)) + out.DataVolumes = *(*[]DataVolume)(unsafe.Pointer(&in.DataVolumes)) + out.KubeletDataVolumeName = (*string)(unsafe.Pointer(in.KubeletDataVolumeName)) + out.Zones = *(*[]string)(unsafe.Pointer(&in.Zones)) + out.MachineControllerManagerSettings = (*MachineControllerManagerSettings)(unsafe.Pointer(in.MachineControllerManagerSettings)) + return nil +} + +// Convert_core_Worker_To_v1alpha1_Worker is an autogenerated conversion function. +func Convert_core_Worker_To_v1alpha1_Worker(in *core.Worker, out *Worker, s conversion.Scope) error { + return autoConvert_core_Worker_To_v1alpha1_Worker(in, out, s) +} + +func autoConvert_v1alpha1_WorkerKubernetes_To_core_WorkerKubernetes(in *WorkerKubernetes, out *core.WorkerKubernetes, s conversion.Scope) error { + out.Kubelet = (*core.KubeletConfig)(unsafe.Pointer(in.Kubelet)) + return nil +} + +// Convert_v1alpha1_WorkerKubernetes_To_core_WorkerKubernetes is an autogenerated conversion function. +func Convert_v1alpha1_WorkerKubernetes_To_core_WorkerKubernetes(in *WorkerKubernetes, out *core.WorkerKubernetes, s conversion.Scope) error { + return autoConvert_v1alpha1_WorkerKubernetes_To_core_WorkerKubernetes(in, out, s) +} + +func autoConvert_core_WorkerKubernetes_To_v1alpha1_WorkerKubernetes(in *core.WorkerKubernetes, out *WorkerKubernetes, s conversion.Scope) error { + out.Kubelet = (*KubeletConfig)(unsafe.Pointer(in.Kubelet)) + return nil +} + +// Convert_core_WorkerKubernetes_To_v1alpha1_WorkerKubernetes is an autogenerated conversion function. +func Convert_core_WorkerKubernetes_To_v1alpha1_WorkerKubernetes(in *core.WorkerKubernetes, out *WorkerKubernetes, s conversion.Scope) error { + return autoConvert_core_WorkerKubernetes_To_v1alpha1_WorkerKubernetes(in, out, s) +} + +func autoConvert_v1alpha1_WorkerSystemComponents_To_core_WorkerSystemComponents(in *WorkerSystemComponents, out *core.WorkerSystemComponents, s conversion.Scope) error { + out.Allow = in.Allow + return nil +} + +// Convert_v1alpha1_WorkerSystemComponents_To_core_WorkerSystemComponents is an autogenerated conversion function. +func Convert_v1alpha1_WorkerSystemComponents_To_core_WorkerSystemComponents(in *WorkerSystemComponents, out *core.WorkerSystemComponents, s conversion.Scope) error { + return autoConvert_v1alpha1_WorkerSystemComponents_To_core_WorkerSystemComponents(in, out, s) +} + +func autoConvert_core_WorkerSystemComponents_To_v1alpha1_WorkerSystemComponents(in *core.WorkerSystemComponents, out *WorkerSystemComponents, s conversion.Scope) error { + out.Allow = in.Allow + return nil +} + +// Convert_core_WorkerSystemComponents_To_v1alpha1_WorkerSystemComponents is an autogenerated conversion function. +func Convert_core_WorkerSystemComponents_To_v1alpha1_WorkerSystemComponents(in *core.WorkerSystemComponents, out *WorkerSystemComponents, s conversion.Scope) error { + return autoConvert_core_WorkerSystemComponents_To_v1alpha1_WorkerSystemComponents(in, out, s) +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..133b3765f --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,4109 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + v1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Addon) DeepCopyInto(out *Addon) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addon. +func (in *Addon) DeepCopy() *Addon { + if in == nil { + return nil + } + out := new(Addon) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Addons) DeepCopyInto(out *Addons) { + *out = *in + if in.KubernetesDashboard != nil { + in, out := &in.KubernetesDashboard, &out.KubernetesDashboard + *out = new(KubernetesDashboard) + (*in).DeepCopyInto(*out) + } + if in.NginxIngress != nil { + in, out := &in.NginxIngress, &out.NginxIngress + *out = new(NginxIngress) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addons. +func (in *Addons) DeepCopy() *Addons { + if in == nil { + return nil + } + out := new(Addons) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionPlugin) DeepCopyInto(out *AdmissionPlugin) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPlugin. +func (in *AdmissionPlugin) DeepCopy() *AdmissionPlugin { + if in == nil { + return nil + } + out := new(AdmissionPlugin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Alerting) DeepCopyInto(out *Alerting) { + *out = *in + if in.EmailReceivers != nil { + in, out := &in.EmailReceivers, &out.EmailReceivers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Alerting. +func (in *Alerting) DeepCopy() *Alerting { + if in == nil { + return nil + } + out := new(Alerting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditConfig) DeepCopyInto(out *AuditConfig) { + *out = *in + if in.AuditPolicy != nil { + in, out := &in.AuditPolicy, &out.AuditPolicy + *out = new(AuditPolicy) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditConfig. +func (in *AuditConfig) DeepCopy() *AuditConfig { + if in == nil { + return nil + } + out := new(AuditConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditPolicy) DeepCopyInto(out *AuditPolicy) { + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(v1.ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditPolicy. +func (in *AuditPolicy) DeepCopy() *AuditPolicy { + if in == nil { + return nil + } + out := new(AuditPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvailabilityZone) DeepCopyInto(out *AvailabilityZone) { + *out = *in + if in.UnavailableMachineTypes != nil { + in, out := &in.UnavailableMachineTypes, &out.UnavailableMachineTypes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UnavailableVolumeTypes != nil { + in, out := &in.UnavailableVolumeTypes, &out.UnavailableVolumeTypes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvailabilityZone. +func (in *AvailabilityZone) DeepCopy() *AvailabilityZone { + if in == nil { + return nil + } + out := new(AvailabilityZone) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucket) DeepCopyInto(out *BackupBucket) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucket. +func (in *BackupBucket) DeepCopy() *BackupBucket { + if in == nil { + return nil + } + out := new(BackupBucket) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupBucket) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucketList) DeepCopyInto(out *BackupBucketList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupBucket, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketList. +func (in *BackupBucketList) DeepCopy() *BackupBucketList { + if in == nil { + return nil + } + out := new(BackupBucketList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupBucketList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucketProvider) DeepCopyInto(out *BackupBucketProvider) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketProvider. +func (in *BackupBucketProvider) DeepCopy() *BackupBucketProvider { + if in == nil { + return nil + } + out := new(BackupBucketProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucketSpec) DeepCopyInto(out *BackupBucketSpec) { + *out = *in + out.Provider = in.Provider + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + out.SecretRef = in.SecretRef + if in.Seed != nil { + in, out := &in.Seed, &out.Seed + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketSpec. +func (in *BackupBucketSpec) DeepCopy() *BackupBucketSpec { + if in == nil { + return nil + } + out := new(BackupBucketSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucketStatus) DeepCopyInto(out *BackupBucketStatus) { + *out = *in + if in.ProviderStatus != nil { + in, out := &in.ProviderStatus, &out.ProviderStatus + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.LastOperation != nil { + in, out := &in.LastOperation, &out.LastOperation + *out = new(LastOperation) + (*in).DeepCopyInto(*out) + } + if in.LastError != nil { + in, out := &in.LastError, &out.LastError + *out = new(LastError) + (*in).DeepCopyInto(*out) + } + if in.GeneratedSecretRef != nil { + in, out := &in.GeneratedSecretRef, &out.GeneratedSecretRef + *out = new(v1.SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketStatus. +func (in *BackupBucketStatus) DeepCopy() *BackupBucketStatus { + if in == nil { + return nil + } + out := new(BackupBucketStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupEntry) DeepCopyInto(out *BackupEntry) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntry. +func (in *BackupEntry) DeepCopy() *BackupEntry { + if in == nil { + return nil + } + out := new(BackupEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupEntry) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupEntryList) DeepCopyInto(out *BackupEntryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntryList. +func (in *BackupEntryList) DeepCopy() *BackupEntryList { + if in == nil { + return nil + } + out := new(BackupEntryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupEntryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupEntrySpec) DeepCopyInto(out *BackupEntrySpec) { + *out = *in + if in.Seed != nil { + in, out := &in.Seed, &out.Seed + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntrySpec. +func (in *BackupEntrySpec) DeepCopy() *BackupEntrySpec { + if in == nil { + return nil + } + out := new(BackupEntrySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupEntryStatus) DeepCopyInto(out *BackupEntryStatus) { + *out = *in + if in.LastOperation != nil { + in, out := &in.LastOperation, &out.LastOperation + *out = new(LastOperation) + (*in).DeepCopyInto(*out) + } + if in.LastError != nil { + in, out := &in.LastError, &out.LastError + *out = new(LastError) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntryStatus. +func (in *BackupEntryStatus) DeepCopy() *BackupEntryStatus { + if in == nil { + return nil + } + out := new(BackupEntryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CRI) DeepCopyInto(out *CRI) { + *out = *in + if in.ContainerRuntimes != nil { + in, out := &in.ContainerRuntimes, &out.ContainerRuntimes + *out = make([]ContainerRuntime, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRI. +func (in *CRI) DeepCopy() *CRI { + if in == nil { + return nil + } + out := new(CRI) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudInfo) DeepCopyInto(out *CloudInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudInfo. +func (in *CloudInfo) DeepCopy() *CloudInfo { + if in == nil { + return nil + } + out := new(CloudInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudProfile) DeepCopyInto(out *CloudProfile) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudProfile. +func (in *CloudProfile) DeepCopy() *CloudProfile { + if in == nil { + return nil + } + out := new(CloudProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudProfile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudProfileList) DeepCopyInto(out *CloudProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CloudProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudProfileList. +func (in *CloudProfileList) DeepCopy() *CloudProfileList { + if in == nil { + return nil + } + out := new(CloudProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudProfileSpec) DeepCopyInto(out *CloudProfileSpec) { + *out = *in + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = new(string) + **out = **in + } + in.Kubernetes.DeepCopyInto(&out.Kubernetes) + if in.MachineImages != nil { + in, out := &in.MachineImages, &out.MachineImages + *out = make([]MachineImage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MachineTypes != nil { + in, out := &in.MachineTypes, &out.MachineTypes + *out = make([]MachineType, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Regions != nil { + in, out := &in.Regions, &out.Regions + *out = make([]Region, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SeedSelector != nil { + in, out := &in.SeedSelector, &out.SeedSelector + *out = new(SeedSelector) + (*in).DeepCopyInto(*out) + } + if in.VolumeTypes != nil { + in, out := &in.VolumeTypes, &out.VolumeTypes + *out = make([]VolumeType, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudProfileSpec. +func (in *CloudProfileSpec) DeepCopy() *CloudProfileSpec { + if in == nil { + return nil + } + out := new(CloudProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAutoscaler) DeepCopyInto(out *ClusterAutoscaler) { + *out = *in + if in.ScaleDownDelayAfterAdd != nil { + in, out := &in.ScaleDownDelayAfterAdd, &out.ScaleDownDelayAfterAdd + *out = new(metav1.Duration) + **out = **in + } + if in.ScaleDownDelayAfterDelete != nil { + in, out := &in.ScaleDownDelayAfterDelete, &out.ScaleDownDelayAfterDelete + *out = new(metav1.Duration) + **out = **in + } + if in.ScaleDownDelayAfterFailure != nil { + in, out := &in.ScaleDownDelayAfterFailure, &out.ScaleDownDelayAfterFailure + *out = new(metav1.Duration) + **out = **in + } + if in.ScaleDownUnneededTime != nil { + in, out := &in.ScaleDownUnneededTime, &out.ScaleDownUnneededTime + *out = new(metav1.Duration) + **out = **in + } + if in.ScaleDownUtilizationThreshold != nil { + in, out := &in.ScaleDownUtilizationThreshold, &out.ScaleDownUtilizationThreshold + *out = new(float64) + **out = **in + } + if in.ScanInterval != nil { + in, out := &in.ScanInterval, &out.ScanInterval + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAutoscaler. +func (in *ClusterAutoscaler) DeepCopy() *ClusterAutoscaler { + if in == nil { + return nil + } + out := new(ClusterAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterInfo) DeepCopyInto(out *ClusterInfo) { + *out = *in + out.Cloud = in.Cloud + out.Kubernetes = in.Kubernetes + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInfo. +func (in *ClusterInfo) DeepCopy() *ClusterInfo { + if in == nil { + return nil + } + out := new(ClusterInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + if in.Codes != nil { + in, out := &in.Codes, &out.Codes + *out = make([]ErrorCode, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerRuntime) DeepCopyInto(out *ContainerRuntime) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRuntime. +func (in *ContainerRuntime) DeepCopy() *ContainerRuntime { + if in == nil { + return nil + } + out := new(ContainerRuntime) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerDeployment) DeepCopyInto(out *ControllerDeployment) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(ControllerDeploymentPolicy) + **out = **in + } + if in.SeedSelector != nil { + in, out := &in.SeedSelector, &out.SeedSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerDeployment. +func (in *ControllerDeployment) DeepCopy() *ControllerDeployment { + if in == nil { + return nil + } + out := new(ControllerDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerInstallation) DeepCopyInto(out *ControllerInstallation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallation. +func (in *ControllerInstallation) DeepCopy() *ControllerInstallation { + if in == nil { + return nil + } + out := new(ControllerInstallation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerInstallation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerInstallationList) DeepCopyInto(out *ControllerInstallationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControllerInstallation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationList. +func (in *ControllerInstallationList) DeepCopy() *ControllerInstallationList { + if in == nil { + return nil + } + out := new(ControllerInstallationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerInstallationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerInstallationSpec) DeepCopyInto(out *ControllerInstallationSpec) { + *out = *in + out.RegistrationRef = in.RegistrationRef + out.SeedRef = in.SeedRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationSpec. +func (in *ControllerInstallationSpec) DeepCopy() *ControllerInstallationSpec { + if in == nil { + return nil + } + out := new(ControllerInstallationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerInstallationStatus) DeepCopyInto(out *ControllerInstallationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProviderStatus != nil { + in, out := &in.ProviderStatus, &out.ProviderStatus + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationStatus. +func (in *ControllerInstallationStatus) DeepCopy() *ControllerInstallationStatus { + if in == nil { + return nil + } + out := new(ControllerInstallationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRegistration) DeepCopyInto(out *ControllerRegistration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRegistration. +func (in *ControllerRegistration) DeepCopy() *ControllerRegistration { + if in == nil { + return nil + } + out := new(ControllerRegistration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRegistration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRegistrationList) DeepCopyInto(out *ControllerRegistrationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControllerRegistration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRegistrationList. +func (in *ControllerRegistrationList) DeepCopy() *ControllerRegistrationList { + if in == nil { + return nil + } + out := new(ControllerRegistrationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRegistrationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRegistrationSpec) DeepCopyInto(out *ControllerRegistrationSpec) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ControllerResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Deployment != nil { + in, out := &in.Deployment, &out.Deployment + *out = new(ControllerDeployment) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRegistrationSpec. +func (in *ControllerRegistrationSpec) DeepCopy() *ControllerRegistrationSpec { + if in == nil { + return nil + } + out := new(ControllerRegistrationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerResource) DeepCopyInto(out *ControllerResource) { + *out = *in + if in.GloballyEnabled != nil { + in, out := &in.GloballyEnabled, &out.GloballyEnabled + *out = new(bool) + **out = **in + } + if in.ReconcileTimeout != nil { + in, out := &in.ReconcileTimeout, &out.ReconcileTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerResource. +func (in *ControllerResource) DeepCopy() *ControllerResource { + if in == nil { + return nil + } + out := new(ControllerResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNS) DeepCopyInto(out *DNS) { + *out = *in + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.Providers != nil { + in, out := &in.Providers, &out.Providers + *out = make([]DNSProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. +func (in *DNS) DeepCopy() *DNS { + if in == nil { + return nil + } + out := new(DNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSIncludeExclude) DeepCopyInto(out *DNSIncludeExclude) { + *out = *in + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Exclude != nil { + in, out := &in.Exclude, &out.Exclude + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSIncludeExclude. +func (in *DNSIncludeExclude) DeepCopy() *DNSIncludeExclude { + if in == nil { + return nil + } + out := new(DNSIncludeExclude) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSProvider) DeepCopyInto(out *DNSProvider) { + *out = *in + if in.Domains != nil { + in, out := &in.Domains, &out.Domains + *out = new(DNSIncludeExclude) + (*in).DeepCopyInto(*out) + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = new(DNSIncludeExclude) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSProvider. +func (in *DNSProvider) DeepCopy() *DNSProvider { + if in == nil { + return nil + } + out := new(DNSProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolume) DeepCopyInto(out *DataVolume) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolume. +func (in *DataVolume) DeepCopy() *DataVolume { + if in == nil { + return nil + } + out := new(DataVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpirableVersion) DeepCopyInto(out *ExpirableVersion) { + *out = *in + if in.ExpirationDate != nil { + in, out := &in.ExpirationDate, &out.ExpirationDate + *out = (*in).DeepCopy() + } + if in.Classification != nil { + in, out := &in.Classification, &out.Classification + *out = new(VersionClassification) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpirableVersion. +func (in *ExpirableVersion) DeepCopy() *ExpirableVersion { + if in == nil { + return nil + } + out := new(ExpirableVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Extension) DeepCopyInto(out *Extension) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extension. +func (in *Extension) DeepCopy() *Extension { + if in == nil { + return nil + } + out := new(Extension) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionResourceState) DeepCopyInto(out *ExtensionResourceState) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Purpose != nil { + in, out := &in.Purpose, &out.Purpose + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]v1beta1.NamedResourceReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionResourceState. +func (in *ExtensionResourceState) DeepCopy() *ExtensionResourceState { + if in == nil { + return nil + } + out := new(ExtensionResourceState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gardener) DeepCopyInto(out *Gardener) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gardener. +func (in *Gardener) DeepCopy() *Gardener { + if in == nil { + return nil + } + out := new(Gardener) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GardenerResourceData) DeepCopyInto(out *GardenerResourceData) { + *out = *in + in.Data.DeepCopyInto(&out.Data) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GardenerResourceData. +func (in *GardenerResourceData) DeepCopy() *GardenerResourceData { + if in == nil { + return nil + } + out := new(GardenerResourceData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Hibernation) DeepCopyInto(out *Hibernation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Schedules != nil { + in, out := &in.Schedules, &out.Schedules + *out = make([]HibernationSchedule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hibernation. +func (in *Hibernation) DeepCopy() *Hibernation { + if in == nil { + return nil + } + out := new(Hibernation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HibernationSchedule) DeepCopyInto(out *HibernationSchedule) { + *out = *in + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HibernationSchedule. +func (in *HibernationSchedule) DeepCopy() *HibernationSchedule { + if in == nil { + return nil + } + out := new(HibernationSchedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerConfig) DeepCopyInto(out *HorizontalPodAutoscalerConfig) { + *out = *in + if in.CPUInitializationPeriod != nil { + in, out := &in.CPUInitializationPeriod, &out.CPUInitializationPeriod + *out = new(metav1.Duration) + **out = **in + } + if in.DownscaleDelay != nil { + in, out := &in.DownscaleDelay, &out.DownscaleDelay + *out = new(metav1.Duration) + **out = **in + } + if in.DownscaleStabilization != nil { + in, out := &in.DownscaleStabilization, &out.DownscaleStabilization + *out = new(metav1.Duration) + **out = **in + } + if in.InitialReadinessDelay != nil { + in, out := &in.InitialReadinessDelay, &out.InitialReadinessDelay + *out = new(metav1.Duration) + **out = **in + } + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(metav1.Duration) + **out = **in + } + if in.Tolerance != nil { + in, out := &in.Tolerance, &out.Tolerance + *out = new(float64) + **out = **in + } + if in.UpscaleDelay != nil { + in, out := &in.UpscaleDelay, &out.UpscaleDelay + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerConfig. +func (in *HorizontalPodAutoscalerConfig) DeepCopy() *HorizontalPodAutoscalerConfig { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ingress) DeepCopyInto(out *Ingress) { + *out = *in + in.Controller.DeepCopyInto(&out.Controller) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. +func (in *Ingress) DeepCopy() *Ingress { + if in == nil { + return nil + } + out := new(Ingress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressController) DeepCopyInto(out *IngressController) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressController. +func (in *IngressController) DeepCopy() *IngressController { + if in == nil { + return nil + } + out := new(IngressController) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) { + *out = *in + in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig) + if in.AdmissionPlugins != nil { + in, out := &in.AdmissionPlugins, &out.AdmissionPlugins + *out = make([]AdmissionPlugin, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.APIAudiences != nil { + in, out := &in.APIAudiences, &out.APIAudiences + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AuditConfig != nil { + in, out := &in.AuditConfig, &out.AuditConfig + *out = new(AuditConfig) + (*in).DeepCopyInto(*out) + } + if in.EnableBasicAuthentication != nil { + in, out := &in.EnableBasicAuthentication, &out.EnableBasicAuthentication + *out = new(bool) + **out = **in + } + if in.OIDCConfig != nil { + in, out := &in.OIDCConfig, &out.OIDCConfig + *out = new(OIDCConfig) + (*in).DeepCopyInto(*out) + } + if in.RuntimeConfig != nil { + in, out := &in.RuntimeConfig, &out.RuntimeConfig + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ServiceAccountConfig != nil { + in, out := &in.ServiceAccountConfig, &out.ServiceAccountConfig + *out = new(ServiceAccountConfig) + (*in).DeepCopyInto(*out) + } + if in.WatchCacheSizes != nil { + in, out := &in.WatchCacheSizes, &out.WatchCacheSizes + *out = new(WatchCacheSizes) + (*in).DeepCopyInto(*out) + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(KubeAPIServerRequests) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerConfig. +func (in *KubeAPIServerConfig) DeepCopy() *KubeAPIServerConfig { + if in == nil { + return nil + } + out := new(KubeAPIServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerRequests) DeepCopyInto(out *KubeAPIServerRequests) { + *out = *in + if in.MaxNonMutatingInflight != nil { + in, out := &in.MaxNonMutatingInflight, &out.MaxNonMutatingInflight + *out = new(int32) + **out = **in + } + if in.MaxMutatingInflight != nil { + in, out := &in.MaxMutatingInflight, &out.MaxMutatingInflight + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerRequests. +func (in *KubeAPIServerRequests) DeepCopy() *KubeAPIServerRequests { + if in == nil { + return nil + } + out := new(KubeAPIServerRequests) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerConfig) { + *out = *in + in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig) + if in.HorizontalPodAutoscalerConfig != nil { + in, out := &in.HorizontalPodAutoscalerConfig, &out.HorizontalPodAutoscalerConfig + *out = new(HorizontalPodAutoscalerConfig) + (*in).DeepCopyInto(*out) + } + if in.NodeCIDRMaskSize != nil { + in, out := &in.NodeCIDRMaskSize, &out.NodeCIDRMaskSize + *out = new(int32) + **out = **in + } + if in.PodEvictionTimeout != nil { + in, out := &in.PodEvictionTimeout, &out.PodEvictionTimeout + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerConfig. +func (in *KubeControllerManagerConfig) DeepCopy() *KubeControllerManagerConfig { + if in == nil { + return nil + } + out := new(KubeControllerManagerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeProxyConfig) DeepCopyInto(out *KubeProxyConfig) { + *out = *in + in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig) + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(ProxyMode) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConfig. +func (in *KubeProxyConfig) DeepCopy() *KubeProxyConfig { + if in == nil { + return nil + } + out := new(KubeProxyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerConfig) DeepCopyInto(out *KubeSchedulerConfig) { + *out = *in + in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig) + if in.KubeMaxPDVols != nil { + in, out := &in.KubeMaxPDVols, &out.KubeMaxPDVols + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfig. +func (in *KubeSchedulerConfig) DeepCopy() *KubeSchedulerConfig { + if in == nil { + return nil + } + out := new(KubeSchedulerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfig) DeepCopyInto(out *KubeletConfig) { + *out = *in + in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig) + if in.CPUCFSQuota != nil { + in, out := &in.CPUCFSQuota, &out.CPUCFSQuota + *out = new(bool) + **out = **in + } + if in.CPUManagerPolicy != nil { + in, out := &in.CPUManagerPolicy, &out.CPUManagerPolicy + *out = new(string) + **out = **in + } + if in.EvictionHard != nil { + in, out := &in.EvictionHard, &out.EvictionHard + *out = new(KubeletConfigEviction) + (*in).DeepCopyInto(*out) + } + if in.EvictionMaxPodGracePeriod != nil { + in, out := &in.EvictionMaxPodGracePeriod, &out.EvictionMaxPodGracePeriod + *out = new(int32) + **out = **in + } + if in.EvictionMinimumReclaim != nil { + in, out := &in.EvictionMinimumReclaim, &out.EvictionMinimumReclaim + *out = new(KubeletConfigEvictionMinimumReclaim) + (*in).DeepCopyInto(*out) + } + if in.EvictionPressureTransitionPeriod != nil { + in, out := &in.EvictionPressureTransitionPeriod, &out.EvictionPressureTransitionPeriod + *out = new(metav1.Duration) + **out = **in + } + if in.EvictionSoft != nil { + in, out := &in.EvictionSoft, &out.EvictionSoft + *out = new(KubeletConfigEviction) + (*in).DeepCopyInto(*out) + } + if in.EvictionSoftGracePeriod != nil { + in, out := &in.EvictionSoftGracePeriod, &out.EvictionSoftGracePeriod + *out = new(KubeletConfigEvictionSoftGracePeriod) + (*in).DeepCopyInto(*out) + } + if in.MaxPods != nil { + in, out := &in.MaxPods, &out.MaxPods + *out = new(int32) + **out = **in + } + if in.PodPIDsLimit != nil { + in, out := &in.PodPIDsLimit, &out.PodPIDsLimit + *out = new(int64) + **out = **in + } + if in.ImagePullProgressDeadline != nil { + in, out := &in.ImagePullProgressDeadline, &out.ImagePullProgressDeadline + *out = new(metav1.Duration) + **out = **in + } + if in.FailSwapOn != nil { + in, out := &in.FailSwapOn, &out.FailSwapOn + *out = new(bool) + **out = **in + } + if in.KubeReserved != nil { + in, out := &in.KubeReserved, &out.KubeReserved + *out = new(KubeletConfigReserved) + (*in).DeepCopyInto(*out) + } + if in.SystemReserved != nil { + in, out := &in.SystemReserved, &out.SystemReserved + *out = new(KubeletConfigReserved) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfig. +func (in *KubeletConfig) DeepCopy() *KubeletConfig { + if in == nil { + return nil + } + out := new(KubeletConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfigEviction) DeepCopyInto(out *KubeletConfigEviction) { + *out = *in + if in.MemoryAvailable != nil { + in, out := &in.MemoryAvailable, &out.MemoryAvailable + *out = new(string) + **out = **in + } + if in.ImageFSAvailable != nil { + in, out := &in.ImageFSAvailable, &out.ImageFSAvailable + *out = new(string) + **out = **in + } + if in.ImageFSInodesFree != nil { + in, out := &in.ImageFSInodesFree, &out.ImageFSInodesFree + *out = new(string) + **out = **in + } + if in.NodeFSAvailable != nil { + in, out := &in.NodeFSAvailable, &out.NodeFSAvailable + *out = new(string) + **out = **in + } + if in.NodeFSInodesFree != nil { + in, out := &in.NodeFSInodesFree, &out.NodeFSInodesFree + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigEviction. +func (in *KubeletConfigEviction) DeepCopy() *KubeletConfigEviction { + if in == nil { + return nil + } + out := new(KubeletConfigEviction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfigEvictionMinimumReclaim) DeepCopyInto(out *KubeletConfigEvictionMinimumReclaim) { + *out = *in + if in.MemoryAvailable != nil { + in, out := &in.MemoryAvailable, &out.MemoryAvailable + x := (*in).DeepCopy() + *out = &x + } + if in.ImageFSAvailable != nil { + in, out := &in.ImageFSAvailable, &out.ImageFSAvailable + x := (*in).DeepCopy() + *out = &x + } + if in.ImageFSInodesFree != nil { + in, out := &in.ImageFSInodesFree, &out.ImageFSInodesFree + x := (*in).DeepCopy() + *out = &x + } + if in.NodeFSAvailable != nil { + in, out := &in.NodeFSAvailable, &out.NodeFSAvailable + x := (*in).DeepCopy() + *out = &x + } + if in.NodeFSInodesFree != nil { + in, out := &in.NodeFSInodesFree, &out.NodeFSInodesFree + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigEvictionMinimumReclaim. +func (in *KubeletConfigEvictionMinimumReclaim) DeepCopy() *KubeletConfigEvictionMinimumReclaim { + if in == nil { + return nil + } + out := new(KubeletConfigEvictionMinimumReclaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfigEvictionSoftGracePeriod) DeepCopyInto(out *KubeletConfigEvictionSoftGracePeriod) { + *out = *in + if in.MemoryAvailable != nil { + in, out := &in.MemoryAvailable, &out.MemoryAvailable + *out = new(metav1.Duration) + **out = **in + } + if in.ImageFSAvailable != nil { + in, out := &in.ImageFSAvailable, &out.ImageFSAvailable + *out = new(metav1.Duration) + **out = **in + } + if in.ImageFSInodesFree != nil { + in, out := &in.ImageFSInodesFree, &out.ImageFSInodesFree + *out = new(metav1.Duration) + **out = **in + } + if in.NodeFSAvailable != nil { + in, out := &in.NodeFSAvailable, &out.NodeFSAvailable + *out = new(metav1.Duration) + **out = **in + } + if in.NodeFSInodesFree != nil { + in, out := &in.NodeFSInodesFree, &out.NodeFSInodesFree + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigEvictionSoftGracePeriod. +func (in *KubeletConfigEvictionSoftGracePeriod) DeepCopy() *KubeletConfigEvictionSoftGracePeriod { + if in == nil { + return nil + } + out := new(KubeletConfigEvictionSoftGracePeriod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfigReserved) DeepCopyInto(out *KubeletConfigReserved) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + x := (*in).DeepCopy() + *out = &x + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + x := (*in).DeepCopy() + *out = &x + } + if in.EphemeralStorage != nil { + in, out := &in.EphemeralStorage, &out.EphemeralStorage + x := (*in).DeepCopy() + *out = &x + } + if in.PID != nil { + in, out := &in.PID, &out.PID + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigReserved. +func (in *KubeletConfigReserved) DeepCopy() *KubeletConfigReserved { + if in == nil { + return nil + } + out := new(KubeletConfigReserved) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Kubernetes) DeepCopyInto(out *Kubernetes) { + *out = *in + if in.AllowPrivilegedContainers != nil { + in, out := &in.AllowPrivilegedContainers, &out.AllowPrivilegedContainers + *out = new(bool) + **out = **in + } + if in.ClusterAutoscaler != nil { + in, out := &in.ClusterAutoscaler, &out.ClusterAutoscaler + *out = new(ClusterAutoscaler) + (*in).DeepCopyInto(*out) + } + if in.KubeAPIServer != nil { + in, out := &in.KubeAPIServer, &out.KubeAPIServer + *out = new(KubeAPIServerConfig) + (*in).DeepCopyInto(*out) + } + if in.KubeControllerManager != nil { + in, out := &in.KubeControllerManager, &out.KubeControllerManager + *out = new(KubeControllerManagerConfig) + (*in).DeepCopyInto(*out) + } + if in.KubeScheduler != nil { + in, out := &in.KubeScheduler, &out.KubeScheduler + *out = new(KubeSchedulerConfig) + (*in).DeepCopyInto(*out) + } + if in.KubeProxy != nil { + in, out := &in.KubeProxy, &out.KubeProxy + *out = new(KubeProxyConfig) + (*in).DeepCopyInto(*out) + } + if in.Kubelet != nil { + in, out := &in.Kubelet, &out.Kubelet + *out = new(KubeletConfig) + (*in).DeepCopyInto(*out) + } + if in.VerticalPodAutoscaler != nil { + in, out := &in.VerticalPodAutoscaler, &out.VerticalPodAutoscaler + *out = new(VerticalPodAutoscaler) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kubernetes. +func (in *Kubernetes) DeepCopy() *Kubernetes { + if in == nil { + return nil + } + out := new(Kubernetes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesConfig) DeepCopyInto(out *KubernetesConfig) { + *out = *in + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesConfig. +func (in *KubernetesConfig) DeepCopy() *KubernetesConfig { + if in == nil { + return nil + } + out := new(KubernetesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesDashboard) DeepCopyInto(out *KubernetesDashboard) { + *out = *in + out.Addon = in.Addon + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesDashboard. +func (in *KubernetesDashboard) DeepCopy() *KubernetesDashboard { + if in == nil { + return nil + } + out := new(KubernetesDashboard) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesInfo) DeepCopyInto(out *KubernetesInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesInfo. +func (in *KubernetesInfo) DeepCopy() *KubernetesInfo { + if in == nil { + return nil + } + out := new(KubernetesInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesSettings) DeepCopyInto(out *KubernetesSettings) { + *out = *in + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]ExpirableVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSettings. +func (in *KubernetesSettings) DeepCopy() *KubernetesSettings { + if in == nil { + return nil + } + out := new(KubernetesSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastError) DeepCopyInto(out *LastError) { + *out = *in + if in.TaskID != nil { + in, out := &in.TaskID, &out.TaskID + *out = new(string) + **out = **in + } + if in.Codes != nil { + in, out := &in.Codes, &out.Codes + *out = make([]ErrorCode, len(*in)) + copy(*out, *in) + } + if in.LastUpdateTime != nil { + in, out := &in.LastUpdateTime, &out.LastUpdateTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastError. +func (in *LastError) DeepCopy() *LastError { + if in == nil { + return nil + } + out := new(LastError) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastOperation) DeepCopyInto(out *LastOperation) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastOperation. +func (in *LastOperation) DeepCopy() *LastOperation { + if in == nil { + return nil + } + out := new(LastOperation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Machine) DeepCopyInto(out *Machine) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(ShootMachineImage) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine. +func (in *Machine) DeepCopy() *Machine { + if in == nil { + return nil + } + out := new(Machine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineControllerManagerSettings) DeepCopyInto(out *MachineControllerManagerSettings) { + *out = *in + if in.MachineDrainTimeout != nil { + in, out := &in.MachineDrainTimeout, &out.MachineDrainTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.MachineHealthTimeout != nil { + in, out := &in.MachineHealthTimeout, &out.MachineHealthTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.MachineCreationTimeout != nil { + in, out := &in.MachineCreationTimeout, &out.MachineCreationTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.MaxEvictRetries != nil { + in, out := &in.MaxEvictRetries, &out.MaxEvictRetries + *out = new(int32) + **out = **in + } + if in.NodeConditions != nil { + in, out := &in.NodeConditions, &out.NodeConditions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineControllerManagerSettings. +func (in *MachineControllerManagerSettings) DeepCopy() *MachineControllerManagerSettings { + if in == nil { + return nil + } + out := new(MachineControllerManagerSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineImage) DeepCopyInto(out *MachineImage) { + *out = *in + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]MachineImageVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineImage. +func (in *MachineImage) DeepCopy() *MachineImage { + if in == nil { + return nil + } + out := new(MachineImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineImageVersion) DeepCopyInto(out *MachineImageVersion) { + *out = *in + in.ExpirableVersion.DeepCopyInto(&out.ExpirableVersion) + if in.CRI != nil { + in, out := &in.CRI, &out.CRI + *out = make([]CRI, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineImageVersion. +func (in *MachineImageVersion) DeepCopy() *MachineImageVersion { + if in == nil { + return nil + } + out := new(MachineImageVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineType) DeepCopyInto(out *MachineType) { + *out = *in + out.CPU = in.CPU.DeepCopy() + out.GPU = in.GPU.DeepCopy() + out.Memory = in.Memory.DeepCopy() + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(MachineTypeStorage) + (*in).DeepCopyInto(*out) + } + if in.Usable != nil { + in, out := &in.Usable, &out.Usable + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineType. +func (in *MachineType) DeepCopy() *MachineType { + if in == nil { + return nil + } + out := new(MachineType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineTypeStorage) DeepCopyInto(out *MachineTypeStorage) { + *out = *in + out.StorageSize = in.StorageSize.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTypeStorage. +func (in *MachineTypeStorage) DeepCopy() *MachineTypeStorage { + if in == nil { + return nil + } + out := new(MachineTypeStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Maintenance) DeepCopyInto(out *Maintenance) { + *out = *in + if in.AutoUpdate != nil { + in, out := &in.AutoUpdate, &out.AutoUpdate + *out = new(MaintenanceAutoUpdate) + **out = **in + } + if in.TimeWindow != nil { + in, out := &in.TimeWindow, &out.TimeWindow + *out = new(MaintenanceTimeWindow) + **out = **in + } + if in.ConfineSpecUpdateRollout != nil { + in, out := &in.ConfineSpecUpdateRollout, &out.ConfineSpecUpdateRollout + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Maintenance. +func (in *Maintenance) DeepCopy() *Maintenance { + if in == nil { + return nil + } + out := new(Maintenance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceAutoUpdate) DeepCopyInto(out *MaintenanceAutoUpdate) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceAutoUpdate. +func (in *MaintenanceAutoUpdate) DeepCopy() *MaintenanceAutoUpdate { + if in == nil { + return nil + } + out := new(MaintenanceAutoUpdate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceTimeWindow) DeepCopyInto(out *MaintenanceTimeWindow) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceTimeWindow. +func (in *MaintenanceTimeWindow) DeepCopy() *MaintenanceTimeWindow { + if in == nil { + return nil + } + out := new(MaintenanceTimeWindow) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Monitoring) DeepCopyInto(out *Monitoring) { + *out = *in + if in.Alerting != nil { + in, out := &in.Alerting, &out.Alerting + *out = new(Alerting) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Monitoring. +func (in *Monitoring) DeepCopy() *Monitoring { + if in == nil { + return nil + } + out := new(Monitoring) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedResourceReference) DeepCopyInto(out *NamedResourceReference) { + *out = *in + out.ResourceRef = in.ResourceRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourceReference. +func (in *NamedResourceReference) DeepCopy() *NamedResourceReference { + if in == nil { + return nil + } + out := new(NamedResourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Networking) DeepCopyInto(out *Networking) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(string) + **out = **in + } + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = new(string) + **out = **in + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking. +func (in *Networking) DeepCopy() *Networking { + if in == nil { + return nil + } + out := new(Networking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NginxIngress) DeepCopyInto(out *NginxIngress) { + *out = *in + out.Addon = in.Addon + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExternalTrafficPolicy != nil { + in, out := &in.ExternalTrafficPolicy, &out.ExternalTrafficPolicy + *out = new(v1.ServiceExternalTrafficPolicyType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxIngress. +func (in *NginxIngress) DeepCopy() *NginxIngress { + if in == nil { + return nil + } + out := new(NginxIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCConfig) DeepCopyInto(out *OIDCConfig) { + *out = *in + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = new(string) + **out = **in + } + if in.ClientAuthentication != nil { + in, out := &in.ClientAuthentication, &out.ClientAuthentication + *out = new(OpenIDConnectClientAuthentication) + (*in).DeepCopyInto(*out) + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.GroupsClaim != nil { + in, out := &in.GroupsClaim, &out.GroupsClaim + *out = new(string) + **out = **in + } + if in.GroupsPrefix != nil { + in, out := &in.GroupsPrefix, &out.GroupsPrefix + *out = new(string) + **out = **in + } + if in.IssuerURL != nil { + in, out := &in.IssuerURL, &out.IssuerURL + *out = new(string) + **out = **in + } + if in.RequiredClaims != nil { + in, out := &in.RequiredClaims, &out.RequiredClaims + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.SigningAlgs != nil { + in, out := &in.SigningAlgs, &out.SigningAlgs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UsernameClaim != nil { + in, out := &in.UsernameClaim, &out.UsernameClaim + *out = new(string) + **out = **in + } + if in.UsernamePrefix != nil { + in, out := &in.UsernamePrefix, &out.UsernamePrefix + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCConfig. +func (in *OIDCConfig) DeepCopy() *OIDCConfig { + if in == nil { + return nil + } + out := new(OIDCConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDConnectClientAuthentication) DeepCopyInto(out *OpenIDConnectClientAuthentication) { + *out = *in + if in.ExtraConfig != nil { + in, out := &in.ExtraConfig, &out.ExtraConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectClientAuthentication. +func (in *OpenIDConnectClientAuthentication) DeepCopy() *OpenIDConnectClientAuthentication { + if in == nil { + return nil + } + out := new(OpenIDConnectClientAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Plant) DeepCopyInto(out *Plant) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plant. +func (in *Plant) DeepCopy() *Plant { + if in == nil { + return nil + } + out := new(Plant) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Plant) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlantList) DeepCopyInto(out *PlantList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Plant, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlantList. +func (in *PlantList) DeepCopy() *PlantList { + if in == nil { + return nil + } + out := new(PlantList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PlantList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlantSpec) DeepCopyInto(out *PlantSpec) { + *out = *in + out.SecretRef = in.SecretRef + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]Endpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlantSpec. +func (in *PlantSpec) DeepCopy() *PlantSpec { + if in == nil { + return nil + } + out := new(PlantSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlantStatus) DeepCopyInto(out *PlantStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.ClusterInfo != nil { + in, out := &in.ClusterInfo, &out.ClusterInfo + *out = new(ClusterInfo) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlantStatus. +func (in *PlantStatus) DeepCopy() *PlantStatus { + if in == nil { + return nil + } + out := new(PlantStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Project) DeepCopyInto(out *Project) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project. +func (in *Project) DeepCopy() *Project { + if in == nil { + return nil + } + out := new(Project) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Project) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectList) DeepCopyInto(out *ProjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Project, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList. +func (in *ProjectList) DeepCopy() *ProjectList { + if in == nil { + return nil + } + out := new(ProjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectMember) DeepCopyInto(out *ProjectMember) { + *out = *in + out.Subject = in.Subject + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectMember. +func (in *ProjectMember) DeepCopy() *ProjectMember { + if in == nil { + return nil + } + out := new(ProjectMember) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { + *out = *in + if in.CreatedBy != nil { + in, out := &in.CreatedBy, &out.CreatedBy + *out = new(rbacv1.Subject) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(rbacv1.Subject) + **out = **in + } + if in.Purpose != nil { + in, out := &in.Purpose, &out.Purpose + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]ProjectMember, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = new(ProjectTolerations) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec. +func (in *ProjectSpec) DeepCopy() *ProjectSpec { + if in == nil { + return nil + } + out := new(ProjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { + *out = *in + if in.StaleSinceTimestamp != nil { + in, out := &in.StaleSinceTimestamp, &out.StaleSinceTimestamp + *out = (*in).DeepCopy() + } + if in.StaleAutoDeleteTimestamp != nil { + in, out := &in.StaleAutoDeleteTimestamp, &out.StaleAutoDeleteTimestamp + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus. +func (in *ProjectStatus) DeepCopy() *ProjectStatus { + if in == nil { + return nil + } + out := new(ProjectStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectTolerations) DeepCopyInto(out *ProjectTolerations) { + *out = *in + if in.Defaults != nil { + in, out := &in.Defaults, &out.Defaults + *out = make([]Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Whitelist != nil { + in, out := &in.Whitelist, &out.Whitelist + *out = make([]Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectTolerations. +func (in *ProjectTolerations) DeepCopy() *ProjectTolerations { + if in == nil { + return nil + } + out := new(ProjectTolerations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Provider) DeepCopyInto(out *Provider) { + *out = *in + if in.ControlPlaneConfig != nil { + in, out := &in.ControlPlaneConfig, &out.ControlPlaneConfig + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureConfig != nil { + in, out := &in.InfrastructureConfig, &out.InfrastructureConfig + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Workers != nil { + in, out := &in.Workers, &out.Workers + *out = make([]Worker, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Provider. +func (in *Provider) DeepCopy() *Provider { + if in == nil { + return nil + } + out := new(Provider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Quota) DeepCopyInto(out *Quota) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Quota. +func (in *Quota) DeepCopy() *Quota { + if in == nil { + return nil + } + out := new(Quota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Quota) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaList) DeepCopyInto(out *QuotaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Quota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaList. +func (in *QuotaList) DeepCopy() *QuotaList { + if in == nil { + return nil + } + out := new(QuotaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *QuotaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaSpec) DeepCopyInto(out *QuotaSpec) { + *out = *in + if in.ClusterLifetimeDays != nil { + in, out := &in.ClusterLifetimeDays, &out.ClusterLifetimeDays + *out = new(int32) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + out.Scope = in.Scope + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaSpec. +func (in *QuotaSpec) DeepCopy() *QuotaSpec { + if in == nil { + return nil + } + out := new(QuotaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Region) DeepCopyInto(out *Region) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]AvailabilityZone, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Region. +func (in *Region) DeepCopy() *Region { + if in == nil { + return nil + } + out := new(Region) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceData) DeepCopyInto(out *ResourceData) { + *out = *in + out.CrossVersionObjectReference = in.CrossVersionObjectReference + in.Data.DeepCopyInto(&out.Data) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceData. +func (in *ResourceData) DeepCopy() *ResourceData { + if in == nil { + return nil + } + out := new(ResourceData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceWatchCacheSize) DeepCopyInto(out *ResourceWatchCacheSize) { + *out = *in + if in.APIGroup != nil { + in, out := &in.APIGroup, &out.APIGroup + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceWatchCacheSize. +func (in *ResourceWatchCacheSize) DeepCopy() *ResourceWatchCacheSize { + if in == nil { + return nil + } + out := new(ResourceWatchCacheSize) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretBinding) DeepCopyInto(out *SecretBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.SecretRef = in.SecretRef + if in.Quotas != nil { + in, out := &in.Quotas, &out.Quotas + *out = make([]v1.ObjectReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretBinding. +func (in *SecretBinding) DeepCopy() *SecretBinding { + if in == nil { + return nil + } + out := new(SecretBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretBindingList) DeepCopyInto(out *SecretBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SecretBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretBindingList. +func (in *SecretBindingList) DeepCopy() *SecretBindingList { + if in == nil { + return nil + } + out := new(SecretBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Seed) DeepCopyInto(out *Seed) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Seed. +func (in *Seed) DeepCopy() *Seed { + if in == nil { + return nil + } + out := new(Seed) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Seed) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedBackup) DeepCopyInto(out *SeedBackup) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + out.SecretRef = in.SecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedBackup. +func (in *SeedBackup) DeepCopy() *SeedBackup { + if in == nil { + return nil + } + out := new(SeedBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedDNS) DeepCopyInto(out *SeedDNS) { + *out = *in + if in.IngressDomain != nil { + in, out := &in.IngressDomain, &out.IngressDomain + *out = new(string) + **out = **in + } + if in.Provider != nil { + in, out := &in.Provider, &out.Provider + *out = new(SeedDNSProvider) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedDNS. +func (in *SeedDNS) DeepCopy() *SeedDNS { + if in == nil { + return nil + } + out := new(SeedDNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedDNSProvider) DeepCopyInto(out *SeedDNSProvider) { + *out = *in + out.SecretRef = in.SecretRef + if in.Domains != nil { + in, out := &in.Domains, &out.Domains + *out = new(DNSIncludeExclude) + (*in).DeepCopyInto(*out) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = new(DNSIncludeExclude) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedDNSProvider. +func (in *SeedDNSProvider) DeepCopy() *SeedDNSProvider { + if in == nil { + return nil + } + out := new(SeedDNSProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedList) DeepCopyInto(out *SeedList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Seed, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedList. +func (in *SeedList) DeepCopy() *SeedList { + if in == nil { + return nil + } + out := new(SeedList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SeedList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedNetworks) DeepCopyInto(out *SeedNetworks) { + *out = *in + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = new(string) + **out = **in + } + if in.ShootDefaults != nil { + in, out := &in.ShootDefaults, &out.ShootDefaults + *out = new(ShootNetworks) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedNetworks. +func (in *SeedNetworks) DeepCopy() *SeedNetworks { + if in == nil { + return nil + } + out := new(SeedNetworks) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedProvider) DeepCopyInto(out *SeedProvider) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedProvider. +func (in *SeedProvider) DeepCopy() *SeedProvider { + if in == nil { + return nil + } + out := new(SeedProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSelector) DeepCopyInto(out *SeedSelector) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ProviderTypes != nil { + in, out := &in.ProviderTypes, &out.ProviderTypes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSelector. +func (in *SeedSelector) DeepCopy() *SeedSelector { + if in == nil { + return nil + } + out := new(SeedSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSettingExcessCapacityReservation) DeepCopyInto(out *SeedSettingExcessCapacityReservation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingExcessCapacityReservation. +func (in *SeedSettingExcessCapacityReservation) DeepCopy() *SeedSettingExcessCapacityReservation { + if in == nil { + return nil + } + out := new(SeedSettingExcessCapacityReservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSettingLoadBalancerServices) DeepCopyInto(out *SeedSettingLoadBalancerServices) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingLoadBalancerServices. +func (in *SeedSettingLoadBalancerServices) DeepCopy() *SeedSettingLoadBalancerServices { + if in == nil { + return nil + } + out := new(SeedSettingLoadBalancerServices) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSettingScheduling) DeepCopyInto(out *SeedSettingScheduling) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingScheduling. +func (in *SeedSettingScheduling) DeepCopy() *SeedSettingScheduling { + if in == nil { + return nil + } + out := new(SeedSettingScheduling) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSettingShootDNS) DeepCopyInto(out *SeedSettingShootDNS) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingShootDNS. +func (in *SeedSettingShootDNS) DeepCopy() *SeedSettingShootDNS { + if in == nil { + return nil + } + out := new(SeedSettingShootDNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSettingVerticalPodAutoscaler) DeepCopyInto(out *SeedSettingVerticalPodAutoscaler) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingVerticalPodAutoscaler. +func (in *SeedSettingVerticalPodAutoscaler) DeepCopy() *SeedSettingVerticalPodAutoscaler { + if in == nil { + return nil + } + out := new(SeedSettingVerticalPodAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSettings) DeepCopyInto(out *SeedSettings) { + *out = *in + if in.ExcessCapacityReservation != nil { + in, out := &in.ExcessCapacityReservation, &out.ExcessCapacityReservation + *out = new(SeedSettingExcessCapacityReservation) + **out = **in + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = new(SeedSettingScheduling) + **out = **in + } + if in.ShootDNS != nil { + in, out := &in.ShootDNS, &out.ShootDNS + *out = new(SeedSettingShootDNS) + **out = **in + } + if in.LoadBalancerServices != nil { + in, out := &in.LoadBalancerServices, &out.LoadBalancerServices + *out = new(SeedSettingLoadBalancerServices) + (*in).DeepCopyInto(*out) + } + if in.VerticalPodAutoscaler != nil { + in, out := &in.VerticalPodAutoscaler, &out.VerticalPodAutoscaler + *out = new(SeedSettingVerticalPodAutoscaler) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettings. +func (in *SeedSettings) DeepCopy() *SeedSettings { + if in == nil { + return nil + } + out := new(SeedSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSpec) DeepCopyInto(out *SeedSpec) { + *out = *in + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(SeedBackup) + (*in).DeepCopyInto(*out) + } + if in.BlockCIDRs != nil { + in, out := &in.BlockCIDRs, &out.BlockCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.DNS.DeepCopyInto(&out.DNS) + in.Networks.DeepCopyInto(&out.Networks) + in.Provider.DeepCopyInto(&out.Provider) + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(v1.SecretReference) + **out = **in + } + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]SeedTaint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volume != nil { + in, out := &in.Volume, &out.Volume + *out = new(SeedVolume) + (*in).DeepCopyInto(*out) + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(SeedSettings) + (*in).DeepCopyInto(*out) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = new(Ingress) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSpec. +func (in *SeedSpec) DeepCopy() *SeedSpec { + if in == nil { + return nil + } + out := new(SeedSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedStatus) DeepCopyInto(out *SeedStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Gardener != nil { + in, out := &in.Gardener, &out.Gardener + *out = new(Gardener) + **out = **in + } + if in.KubernetesVersion != nil { + in, out := &in.KubernetesVersion, &out.KubernetesVersion + *out = new(string) + **out = **in + } + if in.ClusterIdentity != nil { + in, out := &in.ClusterIdentity, &out.ClusterIdentity + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedStatus. +func (in *SeedStatus) DeepCopy() *SeedStatus { + if in == nil { + return nil + } + out := new(SeedStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedTaint) DeepCopyInto(out *SeedTaint) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedTaint. +func (in *SeedTaint) DeepCopy() *SeedTaint { + if in == nil { + return nil + } + out := new(SeedTaint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedVolume) DeepCopyInto(out *SeedVolume) { + *out = *in + if in.MinimumSize != nil { + in, out := &in.MinimumSize, &out.MinimumSize + x := (*in).DeepCopy() + *out = &x + } + if in.Providers != nil { + in, out := &in.Providers, &out.Providers + *out = make([]SeedVolumeProvider, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedVolume. +func (in *SeedVolume) DeepCopy() *SeedVolume { + if in == nil { + return nil + } + out := new(SeedVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedVolumeProvider) DeepCopyInto(out *SeedVolumeProvider) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedVolumeProvider. +func (in *SeedVolumeProvider) DeepCopy() *SeedVolumeProvider { + if in == nil { + return nil + } + out := new(SeedVolumeProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountConfig) DeepCopyInto(out *ServiceAccountConfig) { + *out = *in + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.SigningKeySecret != nil { + in, out := &in.SigningKeySecret, &out.SigningKeySecret + *out = new(v1.LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountConfig. +func (in *ServiceAccountConfig) DeepCopy() *ServiceAccountConfig { + if in == nil { + return nil + } + out := new(ServiceAccountConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Shoot) DeepCopyInto(out *Shoot) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Shoot. +func (in *Shoot) DeepCopy() *Shoot { + if in == nil { + return nil + } + out := new(Shoot) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Shoot) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootList) DeepCopyInto(out *ShootList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Shoot, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootList. +func (in *ShootList) DeepCopy() *ShootList { + if in == nil { + return nil + } + out := new(ShootList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ShootList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootMachineImage) DeepCopyInto(out *ShootMachineImage) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootMachineImage. +func (in *ShootMachineImage) DeepCopy() *ShootMachineImage { + if in == nil { + return nil + } + out := new(ShootMachineImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootNetworks) DeepCopyInto(out *ShootNetworks) { + *out = *in + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(string) + **out = **in + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootNetworks. +func (in *ShootNetworks) DeepCopy() *ShootNetworks { + if in == nil { + return nil + } + out := new(ShootNetworks) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootSpec) DeepCopyInto(out *ShootSpec) { + *out = *in + if in.Addons != nil { + in, out := &in.Addons, &out.Addons + *out = new(Addons) + (*in).DeepCopyInto(*out) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(DNS) + (*in).DeepCopyInto(*out) + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]Extension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Hibernation != nil { + in, out := &in.Hibernation, &out.Hibernation + *out = new(Hibernation) + (*in).DeepCopyInto(*out) + } + in.Kubernetes.DeepCopyInto(&out.Kubernetes) + in.Networking.DeepCopyInto(&out.Networking) + if in.Maintenance != nil { + in, out := &in.Maintenance, &out.Maintenance + *out = new(Maintenance) + (*in).DeepCopyInto(*out) + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(Monitoring) + (*in).DeepCopyInto(*out) + } + in.Provider.DeepCopyInto(&out.Provider) + if in.Purpose != nil { + in, out := &in.Purpose, &out.Purpose + *out = new(ShootPurpose) + **out = **in + } + if in.SeedName != nil { + in, out := &in.SeedName, &out.SeedName + *out = new(string) + **out = **in + } + if in.SeedSelector != nil { + in, out := &in.SeedSelector, &out.SeedSelector + *out = new(SeedSelector) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]NamedResourceReference, len(*in)) + copy(*out, *in) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootSpec. +func (in *ShootSpec) DeepCopy() *ShootSpec { + if in == nil { + return nil + } + out := new(ShootSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootState) DeepCopyInto(out *ShootState) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootState. +func (in *ShootState) DeepCopy() *ShootState { + if in == nil { + return nil + } + out := new(ShootState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ShootState) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootStateList) DeepCopyInto(out *ShootStateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ShootState, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootStateList. +func (in *ShootStateList) DeepCopy() *ShootStateList { + if in == nil { + return nil + } + out := new(ShootStateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ShootStateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootStateSpec) DeepCopyInto(out *ShootStateSpec) { + *out = *in + if in.Gardener != nil { + in, out := &in.Gardener, &out.Gardener + *out = make([]GardenerResourceData, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]ExtensionResourceState, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceData, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootStateSpec. +func (in *ShootStateSpec) DeepCopy() *ShootStateSpec { + if in == nil { + return nil + } + out := new(ShootStateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootStatus) DeepCopyInto(out *ShootStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Constraints != nil { + in, out := &in.Constraints, &out.Constraints + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.Gardener = in.Gardener + if in.LastOperation != nil { + in, out := &in.LastOperation, &out.LastOperation + *out = new(LastOperation) + (*in).DeepCopyInto(*out) + } + if in.LastError != nil { + in, out := &in.LastError, &out.LastError + *out = new(LastError) + (*in).DeepCopyInto(*out) + } + if in.LastErrors != nil { + in, out := &in.LastErrors, &out.LastErrors + *out = make([]LastError, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RetryCycleStartTime != nil { + in, out := &in.RetryCycleStartTime, &out.RetryCycleStartTime + *out = (*in).DeepCopy() + } + if in.Seed != nil { + in, out := &in.Seed, &out.Seed + *out = new(string) + **out = **in + } + if in.ClusterIdentity != nil { + in, out := &in.ClusterIdentity, &out.ClusterIdentity + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootStatus. +func (in *ShootStatus) DeepCopy() *ShootStatus { + if in == nil { + return nil + } + out := new(ShootStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Toleration) DeepCopyInto(out *Toleration) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration. +func (in *Toleration) DeepCopy() *Toleration { + if in == nil { + return nil + } + out := new(Toleration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscaler) DeepCopyInto(out *VerticalPodAutoscaler) { + *out = *in + if in.EvictAfterOOMThreshold != nil { + in, out := &in.EvictAfterOOMThreshold, &out.EvictAfterOOMThreshold + *out = new(metav1.Duration) + **out = **in + } + if in.EvictionRateBurst != nil { + in, out := &in.EvictionRateBurst, &out.EvictionRateBurst + *out = new(int32) + **out = **in + } + if in.EvictionRateLimit != nil { + in, out := &in.EvictionRateLimit, &out.EvictionRateLimit + *out = new(float64) + **out = **in + } + if in.EvictionTolerance != nil { + in, out := &in.EvictionTolerance, &out.EvictionTolerance + *out = new(float64) + **out = **in + } + if in.RecommendationMarginFraction != nil { + in, out := &in.RecommendationMarginFraction, &out.RecommendationMarginFraction + *out = new(float64) + **out = **in + } + if in.UpdaterInterval != nil { + in, out := &in.UpdaterInterval, &out.UpdaterInterval + *out = new(metav1.Duration) + **out = **in + } + if in.RecommenderInterval != nil { + in, out := &in.RecommenderInterval, &out.RecommenderInterval + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscaler. +func (in *VerticalPodAutoscaler) DeepCopy() *VerticalPodAutoscaler { + if in == nil { + return nil + } + out := new(VerticalPodAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Volume) DeepCopyInto(out *Volume) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. +func (in *Volume) DeepCopy() *Volume { + if in == nil { + return nil + } + out := new(Volume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeType) DeepCopyInto(out *VolumeType) { + *out = *in + if in.Usable != nil { + in, out := &in.Usable, &out.Usable + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeType. +func (in *VolumeType) DeepCopy() *VolumeType { + if in == nil { + return nil + } + out := new(VolumeType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WatchCacheSizes) DeepCopyInto(out *WatchCacheSizes) { + *out = *in + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(int32) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceWatchCacheSize, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatchCacheSizes. +func (in *WatchCacheSizes) DeepCopy() *WatchCacheSizes { + if in == nil { + return nil + } + out := new(WatchCacheSizes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Worker) DeepCopyInto(out *Worker) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = new(string) + **out = **in + } + if in.CRI != nil { + in, out := &in.CRI, &out.CRI + *out = new(CRI) + (*in).DeepCopyInto(*out) + } + if in.Kubernetes != nil { + in, out := &in.Kubernetes, &out.Kubernetes + *out = new(WorkerKubernetes) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Machine.DeepCopyInto(&out.Machine) + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]v1.Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volume != nil { + in, out := &in.Volume, &out.Volume + *out = new(Volume) + (*in).DeepCopyInto(*out) + } + if in.DataVolumes != nil { + in, out := &in.DataVolumes, &out.DataVolumes + *out = make([]DataVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KubeletDataVolumeName != nil { + in, out := &in.KubeletDataVolumeName, &out.KubeletDataVolumeName + *out = new(string) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SystemComponents != nil { + in, out := &in.SystemComponents, &out.SystemComponents + *out = new(WorkerSystemComponents) + **out = **in + } + if in.MachineControllerManagerSettings != nil { + in, out := &in.MachineControllerManagerSettings, &out.MachineControllerManagerSettings + *out = new(MachineControllerManagerSettings) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Worker. +func (in *Worker) DeepCopy() *Worker { + if in == nil { + return nil + } + out := new(Worker) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerKubernetes) DeepCopyInto(out *WorkerKubernetes) { + *out = *in + if in.Kubelet != nil { + in, out := &in.Kubelet, &out.Kubelet + *out = new(KubeletConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerKubernetes. +func (in *WorkerKubernetes) DeepCopy() *WorkerKubernetes { + if in == nil { + return nil + } + out := new(WorkerKubernetes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerSystemComponents) DeepCopyInto(out *WorkerSystemComponents) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerSystemComponents. +func (in *WorkerSystemComponents) DeepCopy() *WorkerSystemComponents { + if in == nil { + return nil + } + out := new(WorkerSystemComponents) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.defaults.go new file mode 100644 index 000000000..0c335385b --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,138 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&CloudProfile{}, func(obj interface{}) { SetObjectDefaults_CloudProfile(obj.(*CloudProfile)) }) + scheme.AddTypeDefaultingFunc(&CloudProfileList{}, func(obj interface{}) { SetObjectDefaults_CloudProfileList(obj.(*CloudProfileList)) }) + scheme.AddTypeDefaultingFunc(&ControllerRegistration{}, func(obj interface{}) { SetObjectDefaults_ControllerRegistration(obj.(*ControllerRegistration)) }) + scheme.AddTypeDefaultingFunc(&ControllerRegistrationList{}, func(obj interface{}) { SetObjectDefaults_ControllerRegistrationList(obj.(*ControllerRegistrationList)) }) + scheme.AddTypeDefaultingFunc(&Project{}, func(obj interface{}) { SetObjectDefaults_Project(obj.(*Project)) }) + scheme.AddTypeDefaultingFunc(&ProjectList{}, func(obj interface{}) { SetObjectDefaults_ProjectList(obj.(*ProjectList)) }) + scheme.AddTypeDefaultingFunc(&SecretBinding{}, func(obj interface{}) { SetObjectDefaults_SecretBinding(obj.(*SecretBinding)) }) + scheme.AddTypeDefaultingFunc(&SecretBindingList{}, func(obj interface{}) { SetObjectDefaults_SecretBindingList(obj.(*SecretBindingList)) }) + scheme.AddTypeDefaultingFunc(&Seed{}, func(obj interface{}) { SetObjectDefaults_Seed(obj.(*Seed)) }) + scheme.AddTypeDefaultingFunc(&SeedList{}, func(obj interface{}) { SetObjectDefaults_SeedList(obj.(*SeedList)) }) + scheme.AddTypeDefaultingFunc(&Shoot{}, func(obj interface{}) { SetObjectDefaults_Shoot(obj.(*Shoot)) }) + scheme.AddTypeDefaultingFunc(&ShootList{}, func(obj interface{}) { SetObjectDefaults_ShootList(obj.(*ShootList)) }) + return nil +} + +func SetObjectDefaults_CloudProfile(in *CloudProfile) { + for i := range in.Spec.MachineTypes { + a := &in.Spec.MachineTypes[i] + SetDefaults_MachineType(a) + } + for i := range in.Spec.VolumeTypes { + a := &in.Spec.VolumeTypes[i] + SetDefaults_VolumeType(a) + } +} + +func SetObjectDefaults_CloudProfileList(in *CloudProfileList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_CloudProfile(a) + } +} + +func SetObjectDefaults_ControllerRegistration(in *ControllerRegistration) { + for i := range in.Spec.Resources { + a := &in.Spec.Resources[i] + SetDefaults_ControllerResource(a) + } + if in.Spec.Deployment != nil { + SetDefaults_ControllerDeployment(in.Spec.Deployment) + } +} + +func SetObjectDefaults_ControllerRegistrationList(in *ControllerRegistrationList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ControllerRegistration(a) + } +} + +func SetObjectDefaults_Project(in *Project) { + SetDefaults_Project(in) +} + +func SetObjectDefaults_ProjectList(in *ProjectList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Project(a) + } +} + +func SetObjectDefaults_SecretBinding(in *SecretBinding) { + SetDefaults_SecretBinding(in) +} + +func SetObjectDefaults_SecretBindingList(in *SecretBindingList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_SecretBinding(a) + } +} + +func SetObjectDefaults_Seed(in *Seed) { + SetDefaults_Seed(in) +} + +func SetObjectDefaults_SeedList(in *SeedList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Seed(a) + } +} + +func SetObjectDefaults_Shoot(in *Shoot) { + SetDefaults_Shoot(in) + if in.Spec.Addons != nil { + if in.Spec.Addons.NginxIngress != nil { + SetDefaults_NginxIngress(in.Spec.Addons.NginxIngress) + } + } + if in.Spec.Kubernetes.VerticalPodAutoscaler != nil { + SetDefaults_VerticalPodAutoscaler(in.Spec.Kubernetes.VerticalPodAutoscaler) + } + if in.Spec.Maintenance != nil { + SetDefaults_Maintenance(in.Spec.Maintenance) + } + for i := range in.Spec.Provider.Workers { + a := &in.Spec.Provider.Workers[i] + SetDefaults_Worker(a) + } +} + +func SetObjectDefaults_ShootList(in *ShootList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Shoot(a) + } +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/types_constants.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/types_constants.go index 7e00df8bb..dac01d544 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/types_constants.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/types_constants.go @@ -36,6 +36,9 @@ const ( // SecretNameSSHKeyPair is a constant for the name of a Kubernetes secret object that contains the SSH key pair // (public and private key) that can be used to SSH into the shoot nodes. SecretNameSSHKeyPair = "ssh-keypair" + // SecretNameServiceAccountKey is a constant for the name of a Kubernetes secret object that contains a + // PEM-encoded private RSA or ECDSA key used by the Kube Controller Manager to sign service account tokens + SecretNameServiceAccountKey = "service-account-key" // SecretNameGardener is a constant for the name of a Kubernetes secret object that contains the client // certificate and a kubeconfig for a shoot cluster. It is used by Gardener and can be used by extension @@ -74,41 +77,56 @@ const ( // DeploymentNameKubeStateMetricsShoot is a constant for the name of a Kubernetes deployment object that contains // the kube-state-metrics pod. DeploymentNameKubeStateMetricsShoot = "kube-state-metrics" - // DeploymentNameKubeStateMetricsSeed is a constant for the name of a Kubernetes deployment object that contains - // the kube-state-metrics-seed pod. - DeploymentNameKubeStateMetricsSeed = "kube-state-metrics-seed" - // DeploymentNameKibana is a constant for the name of a Kubernetes deployment object that contains - // the kibana-logging pod. - DeploymentNameKibana = "kibana-logging" + + // DeploymentNameVPAAdmissionController is a constant for the name of the VPA admission controller deployment. + DeploymentNameVPAAdmissionController = "vpa-admission-controller" + // DeploymentNameVPAExporter is a constant for the name of the VPA exporter deployment. + DeploymentNameVPAExporter = "vpa-exporter" + // DeploymentNameVPARecommender is a constant for the name of the VPA recommender deployment. + DeploymentNameVPARecommender = "vpa-recommender" + // DeploymentNameVPAUpdater is a constant for the name of the VPA updater deployment. + DeploymentNameVPAUpdater = "vpa-updater" // StatefulSetNameAlertManager is a constant for the name of a Kubernetes stateful set object that contains // the alertmanager pod. StatefulSetNameAlertManager = "alertmanager" + // ETCDRoleMain is a constant for the main etcd role. + ETCDRoleMain = "main" + // ETCDRoleEvents is a constant for the events etcd role. + ETCDRoleEvents = "events" // ETCDMain is a constant for the name of etcd-main Etcd object. - ETCDMain = "etcd-main" + ETCDMain = "etcd-" + ETCDRoleMain // ETCDEvents is a constant for the name of etcd-events Etcd object. - ETCDEvents = "etcd-events" - // StatefulSetNameElasticSearch is a constant for the name of a Kubernetes stateful set object that contains - // the elasticsearch-logging pod. - StatefulSetNameElasticSearch = "elasticsearch-logging" + ETCDEvents = "etcd-" + ETCDRoleEvents + // StatefulSetNameLoki is a constant for the name of a Kubernetes stateful set object that contains + // the loki pod. + StatefulSetNameLoki = "loki" // StatefulSetNamePrometheus is a constant for the name of a Kubernetes stateful set object that contains // the prometheus pod. StatefulSetNamePrometheus = "prometheus" // GardenerPurpose is a constant for the key in a label describing the purpose of the respective object. GardenerPurpose = "gardener.cloud/purpose" + // GardenerDescription is a constant for a key in an annotation describing what the resource is used for. + GardenerDescription = "gardener.cloud/description" // GardenerOperation is a constant for an annotation on a resource that describes a desired operation. GardenerOperation = "gardener.cloud/operation" // GardenerOperationReconcile is a constant for the value of the operation annotation describing a reconcile // operation. GardenerOperationReconcile = "reconcile" + // GardenerTimestamp is a constant for an annotation on a resource that describes the timestamp when a reconciliation has been requested. + // It is only used to guarantee an update event for watching clients in case the operation-annotation is already present. + GardenerTimestamp = "gardener.cloud/timestamp" // GardenerOperationMigrate is a constant for the value of the operation annotation describing a migration // operation. GardenerOperationMigrate = "migrate" // GardenerOperationRestore is a constant for the value of the operation annotation describing a restoration // operation. GardenerOperationRestore = "restore" + // GardenerOperationWaitForState is a constant for the value of the operation annotation describing a wait + // operation. + GardenerOperationWaitForState = "wait-for-state" // DeprecatedGardenRole is the key for an annotation on a Kubernetes object indicating what it is used for. // @@ -134,11 +152,21 @@ const ( GardenRoleMonitoring = "monitoring" // GardenRoleOptionalAddon is the value of the GardenRole key indicating type 'optional-addon'. GardenRoleOptionalAddon = "optional-addon" + // GardenRoleCloudConfig is the value of the GardenRole key indicating type 'cloud-config'. + GardenRoleCloudConfig = "cloud-config" + // GardenRoleKubeconfig is the value of the GardenRole key indicating type 'kubeconfig'. + GardenRoleKubeconfig = "kubeconfig" + // GardenRoleSSHKeyPair is the value of the GardenRole key indicating type 'ssh-keypair'. + GardenRoleSSHKeyPair = "ssh-keypair" // DeprecatedShootUID is an annotation key for the shoot namespace in the seed cluster, // which value will be the value of `shoot.status.uid` - // +deprecated: Use `Cluster` resource instead. + // + // Deprecated: Use the `Cluster` resource or the annotation key from the new API group `ShootUID`. DeprecatedShootUID = "shoot.garden.sapcloud.io/uid" + // ShootUID is an annotation key for the shoot namespace in the seed cluster, + // which value will be the value of `shoot.status.uid` + ShootUID = "shoot.gardener.cloud/uid" // SeedResourceManagerClass is the resource-class managed by the Gardener-Resource-Manager // instance in the garden namespace on the seeds. @@ -177,14 +205,21 @@ const ( // LabelNetworkPolicyToShootAPIServer allows Egress from pods labeled with 'networking.gardener.cloud/to-shoot-apiserver=allowed' to talk to Shoot's // Kubernetes API Server. LabelNetworkPolicyToShootAPIServer = "networking.gardener.cloud/to-shoot-apiserver" + // LabelNetworkPolicyFromShootAPIServer allows Egress from Shoot's Kubernetes API Server to talk to pods labeled with + // 'networking.gardener.cloud/from-shoot-apiserver=allowed'. + LabelNetworkPolicyFromShootAPIServer = "networking.gardener.cloud/from-shoot-apiserver" // LabelNetworkPolicyToAll disables all Ingress and Egress traffic into/from this namespace when set to "disallowed". LabelNetworkPolicyToAll = "networking.gardener.cloud/to-all" - // LabelNetworkPolicyToElasticSearch allows Ingress to the ElasticSearch API pods labeled with 'networking.gardener.cloud/to-elasticsearch=allowed', - // and fluentd in 'garden' namespace. - LabelNetworkPolicyToElasticSearch = "networking.gardener.cloud/to-elasticsearch" // LabelNetworkPolicyFromPrometheus allows Ingress from Prometheus to pods labeled with 'networking.gardener.cloud/from-prometheus=allowed' and ports // named 'metrics' in the PodSpecification. LabelNetworkPolicyFromPrometheus = "networking.gardener.cloud/from-prometheus" + // LabelNetworkPolicyShootFromSeed allows Ingress traffic from the seed cluster (where the shoot's kube-apiserver + // runs). + LabelNetworkPolicyShootFromSeed = "networking.gardener.cloud/from-seed" + // LabelNetworkPolicyShootToAPIServer allows Egress traffic to the shoot's API server. + LabelNetworkPolicyShootToAPIServer = "networking.gardener.cloud/to-apiserver" + // LabelNetworkPolicyShootToKubelet allows Egress traffic to the kubelets. + LabelNetworkPolicyShootToKubelet = "networking.gardener.cloud/to-kubelet" // LabelNetworkPolicyAllowed is a constant for allowing a network policy. LabelNetworkPolicyAllowed = "allowed" // LabelNetworkPolicyDisallowed is a constant for disallowing a network policy. @@ -220,22 +255,24 @@ const ( // AnnotationShootUseAsSeed is a constant for an annotation on a Shoot resource indicating that the Shoot shall be registered as Seed in the // Garden cluster once successfully created. AnnotationShootUseAsSeed = "shoot.gardener.cloud/use-as-seed" - // AnnotationShootUseAsSeedDeprecated is a constant for an annotation on a Shoot resource indicating that the Shoot shall be registered as Seed in the - // Garden cluster once successfully created. - // - // Deprecated: Use `AnnotationShootUseAsSeed` instead. - AnnotationShootUseAsSeedDeprecated = "shoot.garden.sapcloud.io/use-as-seed" // AnnotationShootIgnoreAlerts is the key for an annotation of a Shoot cluster whose value indicates // if alerts for this cluster should be ignored AnnotationShootIgnoreAlerts = "shoot.gardener.cloud/ignore-alerts" - // AnnotationShootIgnoreAlertsDeprecated is the key for an annotation of a Shoot cluster whose value indicates - // if alerts for this cluster should be ignored - // - // Deprecated: Use `AnnotationShootIgnoreAlerts` instead. - AnnotationShootIgnoreAlertsDeprecated = "shoot.garden.sapcloud.io/ignore-alerts" // AnnotationShootSkipCleanup is a key for an annotation on a Shoot resource that declares that the clean up steps should be skipped when the // cluster is deleted. Concretely, this will skip everything except the deletion of (load balancer) services and persistent volume resources. AnnotationShootSkipCleanup = "shoot.gardener.cloud/skip-cleanup" + // AnnotationShootKonnectivityTunnel is the key for an annotation of a Shoot cluster whose value indicates + // if a konnectivity-tunnel should be deployed into the shoot cluster or not. + AnnotationShootKonnectivityTunnel = "alpha.featuregates.shoot.gardener.cloud/konnectivity-tunnel" + + // AnnotationShootAPIServerSNIPodInjector is the key for an annotation of a Shoot cluster whose value indicates + // if pod injection of 'KUBERNETES_SERVICE_HOST' environment variable should happen for clusters where APIServerSNI + // featuregate is enabled. + // Any value than 'disable' enables this feature. + AnnotationShootAPIServerSNIPodInjector = "alpha.featuregates.shoot.gardener.cloud/apiserver-sni-pod-injector" + // AnnotationShootAPIServerSNIPodInjectorDisableValue is the value of the + // `alpha.featuregates.shoot.gardener.cloud/apiserver-sni-pod-injector` annotation that disables the pod injection. + AnnotationShootAPIServerSNIPodInjectorDisableValue = "disable" // OperatingSystemConfigUnitNameKubeletService is a constant for a unit in the operating system config that contains the kubelet service. OperatingSystemConfigUnitNameKubeletService = "kubelet.service" @@ -263,16 +300,33 @@ const ( // LabelControllerRegistrationName is the key of a label on extension namespaces that indicates the controller registration name. LabelControllerRegistrationName = "controllerregistration.core.gardener.cloud/name" + // LabelPodMaintenanceRestart is a constant for a label that describes that a pod should be restarted during maintenance. + LabelPodMaintenanceRestart = "maintenance.gardener.cloud/restart" + // LabelWorkerPool is a constant for a label that indicates the worker pool the node belongs to + LabelWorkerPool = "worker.gardener.cloud/pool" + // LabelWorkerPoolDeprecated is a deprecated constant for a label that indicates the worker pool the node belongs to + LabelWorkerPoolDeprecated = "worker.garden.sapcloud.io/group" + // LabelWorkerPoolSystemComponents is a constant that indicates whether the worker pool should host system components + LabelWorkerPoolSystemComponents = "worker.gardener.cloud/system-components" // EventResourceReferenced indicates that the resource deletion is in waiting mode because the resource is still // being referenced by at least one other resource (e.g. a SecretBinding is still referenced by a Shoot) EventResourceReferenced = "ResourceReferenced" - // LabelPodMaintenanceRestart is a constant for a label that describes that a pod should be restarted during maintenance. - LabelPodMaintenanceRestart = "maintenance.gardener.cloud/restart" + // PriorityClassNameShootControlPlane is the name of a priority class for critical pods of a shoot control plane. + PriorityClassNameShootControlPlane = "gardener-shoot-controlplane" - // LabelWorkerPool is a constant for a label that indicates the worker pool the node belongs to - LabelWorkerPool = "worker.gardener.cloud/pool" - // LabelWorkerPool is a deprecated constant for a label that indicates the worker pool the node belongs to - LabelWorkerPoolDeprecated = "worker.garden.sapcloud.io/group" + // ReferencedResourcesPrefix is the prefix used when copying referenced resources to the Shoot namespace in the Seed, + // to avoid naming collisions with resources managed by Gardener. + ReferencedResourcesPrefix = "ref-" + + // ClusterIdentity is a constant equal to the name and data key (that stores the identity) of the cluster-identity ConfigMap + ClusterIdentity = "cluster-identity" + + // SeedNginxIngressClass defines the ingress class for the seed nginx ingress controller + SeedNginxIngressClass = "nginx-gardener" + // IngressKindNginx defines nginx as kind as managed Seed ingress + IngressKindNginx = "nginx" + // ShootNginxIngressClass defines the ingress class for the seed nginx ingress controller + ShootNginxIngressClass = "nginx" ) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/utils.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/utils.go index e6862fec2..dadb1ce2c 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/utils.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/utils.go @@ -14,23 +14,11 @@ package constants -// GetShootUseAsSeedAnnotation fetches the value for AnnotationShootUseAsSeed annotation. -// If not present, it fallbacks to AnnotationShootUseAsSeedDeprecated. -func GetShootUseAsSeedAnnotation(annotations map[string]string) (string, bool) { - return getDeprecatedAnnotation(annotations, AnnotationShootUseAsSeed, AnnotationShootUseAsSeedDeprecated) -} - -// GetShootIgnoreAlertsAnnotation fetches the value for AnnotationShootIgnoreAlerts annotation. -// If not present, it fallbacks to AnnotationShootIgnoreAlertsDeprecated. -func GetShootIgnoreAlertsAnnotation(annotations map[string]string) (string, bool) { - return getDeprecatedAnnotation(annotations, AnnotationShootIgnoreAlerts, AnnotationShootIgnoreAlertsDeprecated) -} - -func getDeprecatedAnnotation(annotations map[string]string, annotationKey, deprecatedAnnotationKey string) (string, bool) { - val, ok := annotations[annotationKey] - if !ok { - val, ok = annotations[deprecatedAnnotationKey] +// GetShootVPADeploymentNames returns the names of all VPA related deployments related to shoot clusters. +func GetShootVPADeploymentNames() []string { + return []string{ + DeploymentNameVPAAdmissionController, + DeploymentNameVPARecommender, + DeploymentNameVPAUpdater, } - - return val, ok } diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/conversions.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/conversions.go index 77015e00a..749ed9106 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/conversions.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/conversions.go @@ -52,11 +52,25 @@ func addConversionFuncs(scheme *runtime.Scheme) error { return err } + if err := scheme.AddFieldLabelConversionFunc( + SchemeGroupVersion.WithKind("ControllerInstallation"), + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", core.RegistrationRefName, core.SeedRefName: + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ); err != nil { + return err + } + if err := scheme.AddFieldLabelConversionFunc( SchemeGroupVersion.WithKind("Shoot"), func(label, value string) (string, string, error) { switch label { - case "metadata.name", "metadata.namespace", core.ShootSeedName, core.ShootCloudProfileName: + case "metadata.name", "metadata.namespace", core.ShootSeedName, core.ShootCloudProfileName, core.ShootStatusSeedName: return label, value, nil default: return "", "", fmt.Errorf("field label not supported: %s", label) @@ -66,8 +80,7 @@ func addConversionFuncs(scheme *runtime.Scheme) error { return err } - // Add non-generated conversion functions - return scheme.AddConversionFuncs() + return nil } func Convert_v1beta1_ProjectSpec_To_core_ProjectSpec(in *ProjectSpec, out *core.ProjectSpec, s conversion.Scope) error { diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults.go index a3625a6ad..fc30a75b9 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults.go @@ -16,13 +16,19 @@ package v1beta1 import ( "math" + "time" + v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" "github.com/gardener/gardener/pkg/utils" versionutils "github.com/gardener/gardener/pkg/utils/version" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/pointer" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { @@ -53,6 +59,14 @@ func SetDefaults_Project(obj *Project) { obj.Spec.Members[i].Role = ProjectMemberViewer } } + + if obj.Spec.Namespace != nil && *obj.Spec.Namespace == v1beta1constants.GardenNamespace { + if obj.Spec.Tolerations == nil { + obj.Spec.Tolerations = &ProjectTolerations{} + } + addTolerations(&obj.Spec.Tolerations.Whitelist, Toleration{Key: SeedTaintProtected}) + addTolerations(&obj.Spec.Tolerations.Defaults, Toleration{Key: SeedTaintProtected}) + } } func defaultSubject(obj *rbacv1.Subject) { @@ -84,17 +98,37 @@ func SetDefaults_VolumeType(obj *VolumeType) { } } +// SetDefaults_Seed sets default values for Seed objects. +func SetDefaults_Seed(obj *Seed) { + if obj.Spec.Settings == nil { + obj.Spec.Settings = &SeedSettings{} + } + + if obj.Spec.Settings.ExcessCapacityReservation == nil { + obj.Spec.Settings.ExcessCapacityReservation = &SeedSettingExcessCapacityReservation{Enabled: true} + } + + if obj.Spec.Settings.Scheduling == nil { + obj.Spec.Settings.Scheduling = &SeedSettingScheduling{Visible: true} + } + + if obj.Spec.Settings.ShootDNS == nil { + obj.Spec.Settings.ShootDNS = &SeedSettingShootDNS{Enabled: true} + } + + if obj.Spec.Settings.VerticalPodAutoscaler == nil { + obj.Spec.Settings.VerticalPodAutoscaler = &SeedSettingVerticalPodAutoscaler{Enabled: true} + } +} + // SetDefaults_Shoot sets default values for Shoot objects. func SetDefaults_Shoot(obj *Shoot) { k8sVersionLessThan116, _ := versionutils.CompareVersions(obj.Spec.Kubernetes.Version, "<", "1.16") // Error is ignored here because we cannot do anything meaningful with it. // k8sVersionLessThan116 will default to `false`. - trueVar := true - falseVar := false - if obj.Spec.Kubernetes.AllowPrivilegedContainers == nil { - obj.Spec.Kubernetes.AllowPrivilegedContainers = &trueVar + obj.Spec.Kubernetes.AllowPrivilegedContainers = pointer.BoolPtr(true) } if obj.Spec.Kubernetes.KubeAPIServer == nil { @@ -102,11 +136,20 @@ func SetDefaults_Shoot(obj *Shoot) { } if obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication == nil { if k8sVersionLessThan116 { - obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication = &trueVar + obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication = pointer.BoolPtr(true) } else { - obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication = &falseVar + obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication = pointer.BoolPtr(false) } } + if obj.Spec.Kubernetes.KubeAPIServer.Requests == nil { + obj.Spec.Kubernetes.KubeAPIServer.Requests = &KubeAPIServerRequests{} + } + if obj.Spec.Kubernetes.KubeAPIServer.Requests.MaxNonMutatingInflight == nil { + obj.Spec.Kubernetes.KubeAPIServer.Requests.MaxNonMutatingInflight = pointer.Int32Ptr(400) + } + if obj.Spec.Kubernetes.KubeAPIServer.Requests.MaxMutatingInflight == nil { + obj.Spec.Kubernetes.KubeAPIServer.Requests.MaxMutatingInflight = pointer.Int32Ptr(200) + } if obj.Spec.Kubernetes.KubeControllerManager == nil { obj.Spec.Kubernetes.KubeControllerManager = &KubeControllerManagerConfig{} @@ -114,6 +157,9 @@ func SetDefaults_Shoot(obj *Shoot) { if obj.Spec.Kubernetes.KubeControllerManager.NodeCIDRMaskSize == nil { obj.Spec.Kubernetes.KubeControllerManager.NodeCIDRMaskSize = calculateDefaultNodeCIDRMaskSize(obj.Spec.Kubernetes.Kubelet, obj.Spec.Provider.Workers) } + if obj.Spec.Kubernetes.KubeControllerManager.PodEvictionTimeout == nil { + obj.Spec.Kubernetes.KubeControllerManager.PodEvictionTimeout = &metav1.Duration{Duration: 2 * time.Minute} + } if obj.Spec.Kubernetes.KubeProxy == nil { obj.Spec.Kubernetes.KubeProxy = &KubeProxyConfig{} @@ -143,14 +189,54 @@ func SetDefaults_Shoot(obj *Shoot) { p := ShootPurposeEvaluation obj.Spec.Purpose = &p } + + // In previous Gardener versions that weren't supporting tolerations, it was hard-coded to (only) allow shoots in the + // `garden` namespace to use seeds that had the 'protected' taint. In order to be backwards compatible, now with the + // introduction of tolerations, we add the 'protected' toleration to the garden namespace by default. + if obj.Namespace == v1beta1constants.GardenNamespace { + addTolerations(&obj.Spec.Tolerations, Toleration{Key: SeedTaintProtected}) + } + + if obj.Spec.Kubernetes.Kubelet == nil { + obj.Spec.Kubernetes.Kubelet = &KubeletConfig{} + } + if obj.Spec.Kubernetes.Kubelet.FailSwapOn == nil { + obj.Spec.Kubernetes.Kubelet.FailSwapOn = pointer.BoolPtr(true) + } + + var ( + kubeReservedMemory = resource.MustParse("1Gi") + kubeReservedCPU = resource.MustParse("80m") + kubeReservedPID = resource.MustParse("20k") + + k8sVersionGreaterEqual115, _ = versionutils.CompareVersions(obj.Spec.Kubernetes.Version, ">=", "1.15") + ) + + if obj.Spec.Kubernetes.Kubelet.KubeReserved == nil { + obj.Spec.Kubernetes.Kubelet.KubeReserved = &KubeletConfigReserved{Memory: &kubeReservedMemory, CPU: &kubeReservedCPU} + + if k8sVersionGreaterEqual115 { + obj.Spec.Kubernetes.Kubelet.KubeReserved.PID = &kubeReservedPID + } + } else { + if obj.Spec.Kubernetes.Kubelet.KubeReserved.Memory == nil { + obj.Spec.Kubernetes.Kubelet.KubeReserved.Memory = &kubeReservedMemory + } + if obj.Spec.Kubernetes.Kubelet.KubeReserved.CPU == nil { + obj.Spec.Kubernetes.Kubelet.KubeReserved.CPU = &kubeReservedCPU + } + if obj.Spec.Kubernetes.Kubelet.KubeReserved.PID == nil && k8sVersionGreaterEqual115 { + obj.Spec.Kubernetes.Kubelet.KubeReserved.PID = &kubeReservedPID + } + } + + if obj.Spec.Maintenance == nil { + obj.Spec.Maintenance = &Maintenance{} + } } // SetDefaults_Maintenance sets default values for Maintenance objects. func SetDefaults_Maintenance(obj *Maintenance) { - if obj == nil { - obj = &Maintenance{} - } - if obj.AutoUpdate == nil { obj.AutoUpdate = &MaintenanceAutoUpdate{ KubernetesVersion: true, @@ -167,6 +253,38 @@ func SetDefaults_Maintenance(obj *Maintenance) { } } +// SetDefaults_VerticalPodAutoscaler sets default values for VerticalPodAutoscaler objects. +func SetDefaults_VerticalPodAutoscaler(obj *VerticalPodAutoscaler) { + if obj.EvictAfterOOMThreshold == nil { + v := DefaultEvictAfterOOMThreshold + obj.EvictAfterOOMThreshold = &v + } + if obj.EvictionRateBurst == nil { + v := DefaultEvictionRateBurst + obj.EvictionRateBurst = &v + } + if obj.EvictionRateLimit == nil { + v := DefaultEvictionRateLimit + obj.EvictionRateLimit = &v + } + if obj.EvictionTolerance == nil { + v := DefaultEvictionTolerance + obj.EvictionTolerance = &v + } + if obj.RecommendationMarginFraction == nil { + v := DefaultRecommendationMarginFraction + obj.RecommendationMarginFraction = &v + } + if obj.UpdaterInterval == nil { + v := DefaultUpdaterInterval + obj.UpdaterInterval = &v + } + if obj.RecommenderInterval == nil { + v := DefaultRecommenderInterval + obj.RecommenderInterval = &v + } +} + // SetDefaults_Worker sets default values for Worker objects. func SetDefaults_Worker(obj *Worker) { if obj.MaxSurge == nil { @@ -175,6 +293,11 @@ func SetDefaults_Worker(obj *Worker) { if obj.MaxUnavailable == nil { obj.MaxUnavailable = &DefaultWorkerMaxUnavailable } + if obj.SystemComponents == nil { + obj.SystemComponents = &WorkerSystemComponents{ + Allow: DefaultWorkerSystemComponentsAllow, + } + } } // SetDefaults_NginxIngress sets default values for NginxIngress objects. @@ -185,6 +308,21 @@ func SetDefaults_NginxIngress(obj *NginxIngress) { } } +// SetDefaults_ControllerResource sets default values for ControllerResource objects. +func SetDefaults_ControllerResource(obj *ControllerResource) { + if obj.Primary == nil { + obj.Primary = pointer.BoolPtr(true) + } +} + +// SetDefaults_ControllerDeployment sets default values for ControllerDeployment objects. +func SetDefaults_ControllerDeployment(obj *ControllerDeployment) { + p := ControllerDeploymentPolicyOnDemand + if obj.Policy == nil { + obj.Policy = &p + } +} + // Helper functions func calculateDefaultNodeCIDRMaskSize(kubelet *KubeletConfig, workers []Worker) *int32 { @@ -204,3 +342,20 @@ func calculateDefaultNodeCIDRMaskSize(kubelet *KubeletConfig, workers []Worker) nodeCidrRange := int32(32 - int(math.Ceil(math.Log2(float64(maxPods*2))))) return &nodeCidrRange } + +func addTolerations(tolerations *[]Toleration, additionalTolerations ...Toleration) { + existingTolerations := sets.NewString() + for _, toleration := range *tolerations { + existingTolerations.Insert(utils.IDForKeyWithOptionalValue(toleration.Key, toleration.Value)) + } + + for _, toleration := range additionalTolerations { + if existingTolerations.Has(toleration.Key) { + continue + } + if existingTolerations.Has(utils.IDForKeyWithOptionalValue(toleration.Key, toleration.Value)) { + continue + } + *tolerations = append(*tolerations, toleration) + } +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.pb.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.pb.go index 5a47363c5..e36c20abe 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.pb.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -28,9 +28,10 @@ import ( github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" k8s_io_api_core_v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" - v12 "k8s.io/api/rbac/v1" + v13 "k8s.io/api/rbac/v1" resource "k8s.io/apimachinery/pkg/api/resource" v11 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" math "math" math_bits "math/bits" @@ -1088,10 +1089,38 @@ func (m *DNSProvider) XXX_DiscardUnknown() { var xxx_messageInfo_DNSProvider proto.InternalMessageInfo +func (m *DataVolume) Reset() { *m = DataVolume{} } +func (*DataVolume) ProtoMessage() {} +func (*DataVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{37} +} +func (m *DataVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DataVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DataVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_DataVolume.Merge(m, src) +} +func (m *DataVolume) XXX_Size() int { + return m.Size() +} +func (m *DataVolume) XXX_DiscardUnknown() { + xxx_messageInfo_DataVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_DataVolume proto.InternalMessageInfo + func (m *Endpoint) Reset() { *m = Endpoint{} } func (*Endpoint) ProtoMessage() {} func (*Endpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{37} + return fileDescriptor_ca37af0df9a5bbd2, []int{38} } func (m *Endpoint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1119,7 +1148,7 @@ var xxx_messageInfo_Endpoint proto.InternalMessageInfo func (m *ExpirableVersion) Reset() { *m = ExpirableVersion{} } func (*ExpirableVersion) ProtoMessage() {} func (*ExpirableVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{38} + return fileDescriptor_ca37af0df9a5bbd2, []int{39} } func (m *ExpirableVersion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1147,7 +1176,7 @@ var xxx_messageInfo_ExpirableVersion proto.InternalMessageInfo func (m *Extension) Reset() { *m = Extension{} } func (*Extension) ProtoMessage() {} func (*Extension) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{39} + return fileDescriptor_ca37af0df9a5bbd2, []int{40} } func (m *Extension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1175,7 +1204,7 @@ var xxx_messageInfo_Extension proto.InternalMessageInfo func (m *Gardener) Reset() { *m = Gardener{} } func (*Gardener) ProtoMessage() {} func (*Gardener) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{40} + return fileDescriptor_ca37af0df9a5bbd2, []int{41} } func (m *Gardener) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1203,7 +1232,7 @@ var xxx_messageInfo_Gardener proto.InternalMessageInfo func (m *Hibernation) Reset() { *m = Hibernation{} } func (*Hibernation) ProtoMessage() {} func (*Hibernation) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{41} + return fileDescriptor_ca37af0df9a5bbd2, []int{42} } func (m *Hibernation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1231,7 +1260,7 @@ var xxx_messageInfo_Hibernation proto.InternalMessageInfo func (m *HibernationSchedule) Reset() { *m = HibernationSchedule{} } func (*HibernationSchedule) ProtoMessage() {} func (*HibernationSchedule) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{42} + return fileDescriptor_ca37af0df9a5bbd2, []int{43} } func (m *HibernationSchedule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1259,7 +1288,7 @@ var xxx_messageInfo_HibernationSchedule proto.InternalMessageInfo func (m *HorizontalPodAutoscalerConfig) Reset() { *m = HorizontalPodAutoscalerConfig{} } func (*HorizontalPodAutoscalerConfig) ProtoMessage() {} func (*HorizontalPodAutoscalerConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{43} + return fileDescriptor_ca37af0df9a5bbd2, []int{44} } func (m *HorizontalPodAutoscalerConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1284,10 +1313,66 @@ func (m *HorizontalPodAutoscalerConfig) XXX_DiscardUnknown() { var xxx_messageInfo_HorizontalPodAutoscalerConfig proto.InternalMessageInfo +func (m *Ingress) Reset() { *m = Ingress{} } +func (*Ingress) ProtoMessage() {} +func (*Ingress) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{45} +} +func (m *Ingress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Ingress) XXX_Merge(src proto.Message) { + xxx_messageInfo_Ingress.Merge(m, src) +} +func (m *Ingress) XXX_Size() int { + return m.Size() +} +func (m *Ingress) XXX_DiscardUnknown() { + xxx_messageInfo_Ingress.DiscardUnknown(m) +} + +var xxx_messageInfo_Ingress proto.InternalMessageInfo + +func (m *IngressController) Reset() { *m = IngressController{} } +func (*IngressController) ProtoMessage() {} +func (*IngressController) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{46} +} +func (m *IngressController) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressController) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressController) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressController.Merge(m, src) +} +func (m *IngressController) XXX_Size() int { + return m.Size() +} +func (m *IngressController) XXX_DiscardUnknown() { + xxx_messageInfo_IngressController.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressController proto.InternalMessageInfo + func (m *KubeAPIServerConfig) Reset() { *m = KubeAPIServerConfig{} } func (*KubeAPIServerConfig) ProtoMessage() {} func (*KubeAPIServerConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{44} + return fileDescriptor_ca37af0df9a5bbd2, []int{47} } func (m *KubeAPIServerConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,10 +1397,38 @@ func (m *KubeAPIServerConfig) XXX_DiscardUnknown() { var xxx_messageInfo_KubeAPIServerConfig proto.InternalMessageInfo +func (m *KubeAPIServerRequests) Reset() { *m = KubeAPIServerRequests{} } +func (*KubeAPIServerRequests) ProtoMessage() {} +func (*KubeAPIServerRequests) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{48} +} +func (m *KubeAPIServerRequests) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KubeAPIServerRequests) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KubeAPIServerRequests) XXX_Merge(src proto.Message) { + xxx_messageInfo_KubeAPIServerRequests.Merge(m, src) +} +func (m *KubeAPIServerRequests) XXX_Size() int { + return m.Size() +} +func (m *KubeAPIServerRequests) XXX_DiscardUnknown() { + xxx_messageInfo_KubeAPIServerRequests.DiscardUnknown(m) +} + +var xxx_messageInfo_KubeAPIServerRequests proto.InternalMessageInfo + func (m *KubeControllerManagerConfig) Reset() { *m = KubeControllerManagerConfig{} } func (*KubeControllerManagerConfig) ProtoMessage() {} func (*KubeControllerManagerConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{45} + return fileDescriptor_ca37af0df9a5bbd2, []int{49} } func (m *KubeControllerManagerConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1343,7 +1456,7 @@ var xxx_messageInfo_KubeControllerManagerConfig proto.InternalMessageInfo func (m *KubeProxyConfig) Reset() { *m = KubeProxyConfig{} } func (*KubeProxyConfig) ProtoMessage() {} func (*KubeProxyConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{46} + return fileDescriptor_ca37af0df9a5bbd2, []int{50} } func (m *KubeProxyConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1371,7 +1484,7 @@ var xxx_messageInfo_KubeProxyConfig proto.InternalMessageInfo func (m *KubeSchedulerConfig) Reset() { *m = KubeSchedulerConfig{} } func (*KubeSchedulerConfig) ProtoMessage() {} func (*KubeSchedulerConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{47} + return fileDescriptor_ca37af0df9a5bbd2, []int{51} } func (m *KubeSchedulerConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1399,7 +1512,7 @@ var xxx_messageInfo_KubeSchedulerConfig proto.InternalMessageInfo func (m *KubeletConfig) Reset() { *m = KubeletConfig{} } func (*KubeletConfig) ProtoMessage() {} func (*KubeletConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{48} + return fileDescriptor_ca37af0df9a5bbd2, []int{52} } func (m *KubeletConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1427,7 +1540,7 @@ var xxx_messageInfo_KubeletConfig proto.InternalMessageInfo func (m *KubeletConfigEviction) Reset() { *m = KubeletConfigEviction{} } func (*KubeletConfigEviction) ProtoMessage() {} func (*KubeletConfigEviction) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{49} + return fileDescriptor_ca37af0df9a5bbd2, []int{53} } func (m *KubeletConfigEviction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1455,7 +1568,7 @@ var xxx_messageInfo_KubeletConfigEviction proto.InternalMessageInfo func (m *KubeletConfigEvictionMinimumReclaim) Reset() { *m = KubeletConfigEvictionMinimumReclaim{} } func (*KubeletConfigEvictionMinimumReclaim) ProtoMessage() {} func (*KubeletConfigEvictionMinimumReclaim) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{50} + return fileDescriptor_ca37af0df9a5bbd2, []int{54} } func (m *KubeletConfigEvictionMinimumReclaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1483,7 +1596,7 @@ var xxx_messageInfo_KubeletConfigEvictionMinimumReclaim proto.InternalMessageInf func (m *KubeletConfigEvictionSoftGracePeriod) Reset() { *m = KubeletConfigEvictionSoftGracePeriod{} } func (*KubeletConfigEvictionSoftGracePeriod) ProtoMessage() {} func (*KubeletConfigEvictionSoftGracePeriod) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{51} + return fileDescriptor_ca37af0df9a5bbd2, []int{55} } func (m *KubeletConfigEvictionSoftGracePeriod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1508,10 +1621,38 @@ func (m *KubeletConfigEvictionSoftGracePeriod) XXX_DiscardUnknown() { var xxx_messageInfo_KubeletConfigEvictionSoftGracePeriod proto.InternalMessageInfo +func (m *KubeletConfigReserved) Reset() { *m = KubeletConfigReserved{} } +func (*KubeletConfigReserved) ProtoMessage() {} +func (*KubeletConfigReserved) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{56} +} +func (m *KubeletConfigReserved) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KubeletConfigReserved) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KubeletConfigReserved) XXX_Merge(src proto.Message) { + xxx_messageInfo_KubeletConfigReserved.Merge(m, src) +} +func (m *KubeletConfigReserved) XXX_Size() int { + return m.Size() +} +func (m *KubeletConfigReserved) XXX_DiscardUnknown() { + xxx_messageInfo_KubeletConfigReserved.DiscardUnknown(m) +} + +var xxx_messageInfo_KubeletConfigReserved proto.InternalMessageInfo + func (m *Kubernetes) Reset() { *m = Kubernetes{} } func (*Kubernetes) ProtoMessage() {} func (*Kubernetes) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{52} + return fileDescriptor_ca37af0df9a5bbd2, []int{57} } func (m *Kubernetes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1539,7 +1680,7 @@ var xxx_messageInfo_Kubernetes proto.InternalMessageInfo func (m *KubernetesConfig) Reset() { *m = KubernetesConfig{} } func (*KubernetesConfig) ProtoMessage() {} func (*KubernetesConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{53} + return fileDescriptor_ca37af0df9a5bbd2, []int{58} } func (m *KubernetesConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1567,7 +1708,7 @@ var xxx_messageInfo_KubernetesConfig proto.InternalMessageInfo func (m *KubernetesDashboard) Reset() { *m = KubernetesDashboard{} } func (*KubernetesDashboard) ProtoMessage() {} func (*KubernetesDashboard) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{54} + return fileDescriptor_ca37af0df9a5bbd2, []int{59} } func (m *KubernetesDashboard) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1595,7 +1736,7 @@ var xxx_messageInfo_KubernetesDashboard proto.InternalMessageInfo func (m *KubernetesInfo) Reset() { *m = KubernetesInfo{} } func (*KubernetesInfo) ProtoMessage() {} func (*KubernetesInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{55} + return fileDescriptor_ca37af0df9a5bbd2, []int{60} } func (m *KubernetesInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1623,7 +1764,7 @@ var xxx_messageInfo_KubernetesInfo proto.InternalMessageInfo func (m *KubernetesSettings) Reset() { *m = KubernetesSettings{} } func (*KubernetesSettings) ProtoMessage() {} func (*KubernetesSettings) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{56} + return fileDescriptor_ca37af0df9a5bbd2, []int{61} } func (m *KubernetesSettings) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1651,7 +1792,7 @@ var xxx_messageInfo_KubernetesSettings proto.InternalMessageInfo func (m *LastError) Reset() { *m = LastError{} } func (*LastError) ProtoMessage() {} func (*LastError) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{57} + return fileDescriptor_ca37af0df9a5bbd2, []int{62} } func (m *LastError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1679,7 +1820,7 @@ var xxx_messageInfo_LastError proto.InternalMessageInfo func (m *LastOperation) Reset() { *m = LastOperation{} } func (*LastOperation) ProtoMessage() {} func (*LastOperation) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{58} + return fileDescriptor_ca37af0df9a5bbd2, []int{63} } func (m *LastOperation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1707,7 +1848,7 @@ var xxx_messageInfo_LastOperation proto.InternalMessageInfo func (m *Machine) Reset() { *m = Machine{} } func (*Machine) ProtoMessage() {} func (*Machine) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{59} + return fileDescriptor_ca37af0df9a5bbd2, []int{64} } func (m *Machine) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1732,10 +1873,38 @@ func (m *Machine) XXX_DiscardUnknown() { var xxx_messageInfo_Machine proto.InternalMessageInfo +func (m *MachineControllerManagerSettings) Reset() { *m = MachineControllerManagerSettings{} } +func (*MachineControllerManagerSettings) ProtoMessage() {} +func (*MachineControllerManagerSettings) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{65} +} +func (m *MachineControllerManagerSettings) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MachineControllerManagerSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MachineControllerManagerSettings) XXX_Merge(src proto.Message) { + xxx_messageInfo_MachineControllerManagerSettings.Merge(m, src) +} +func (m *MachineControllerManagerSettings) XXX_Size() int { + return m.Size() +} +func (m *MachineControllerManagerSettings) XXX_DiscardUnknown() { + xxx_messageInfo_MachineControllerManagerSettings.DiscardUnknown(m) +} + +var xxx_messageInfo_MachineControllerManagerSettings proto.InternalMessageInfo + func (m *MachineImage) Reset() { *m = MachineImage{} } func (*MachineImage) ProtoMessage() {} func (*MachineImage) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{60} + return fileDescriptor_ca37af0df9a5bbd2, []int{66} } func (m *MachineImage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1760,10 +1929,38 @@ func (m *MachineImage) XXX_DiscardUnknown() { var xxx_messageInfo_MachineImage proto.InternalMessageInfo +func (m *MachineImageVersion) Reset() { *m = MachineImageVersion{} } +func (*MachineImageVersion) ProtoMessage() {} +func (*MachineImageVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{67} +} +func (m *MachineImageVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MachineImageVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MachineImageVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_MachineImageVersion.Merge(m, src) +} +func (m *MachineImageVersion) XXX_Size() int { + return m.Size() +} +func (m *MachineImageVersion) XXX_DiscardUnknown() { + xxx_messageInfo_MachineImageVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_MachineImageVersion proto.InternalMessageInfo + func (m *MachineType) Reset() { *m = MachineType{} } func (*MachineType) ProtoMessage() {} func (*MachineType) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{61} + return fileDescriptor_ca37af0df9a5bbd2, []int{68} } func (m *MachineType) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1791,7 +1988,7 @@ var xxx_messageInfo_MachineType proto.InternalMessageInfo func (m *MachineTypeStorage) Reset() { *m = MachineTypeStorage{} } func (*MachineTypeStorage) ProtoMessage() {} func (*MachineTypeStorage) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{62} + return fileDescriptor_ca37af0df9a5bbd2, []int{69} } func (m *MachineTypeStorage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1819,7 +2016,7 @@ var xxx_messageInfo_MachineTypeStorage proto.InternalMessageInfo func (m *Maintenance) Reset() { *m = Maintenance{} } func (*Maintenance) ProtoMessage() {} func (*Maintenance) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{63} + return fileDescriptor_ca37af0df9a5bbd2, []int{70} } func (m *Maintenance) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1847,7 +2044,7 @@ var xxx_messageInfo_Maintenance proto.InternalMessageInfo func (m *MaintenanceAutoUpdate) Reset() { *m = MaintenanceAutoUpdate{} } func (*MaintenanceAutoUpdate) ProtoMessage() {} func (*MaintenanceAutoUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{64} + return fileDescriptor_ca37af0df9a5bbd2, []int{71} } func (m *MaintenanceAutoUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1875,7 +2072,7 @@ var xxx_messageInfo_MaintenanceAutoUpdate proto.InternalMessageInfo func (m *MaintenanceTimeWindow) Reset() { *m = MaintenanceTimeWindow{} } func (*MaintenanceTimeWindow) ProtoMessage() {} func (*MaintenanceTimeWindow) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{65} + return fileDescriptor_ca37af0df9a5bbd2, []int{72} } func (m *MaintenanceTimeWindow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1903,7 +2100,7 @@ var xxx_messageInfo_MaintenanceTimeWindow proto.InternalMessageInfo func (m *Monitoring) Reset() { *m = Monitoring{} } func (*Monitoring) ProtoMessage() {} func (*Monitoring) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{66} + return fileDescriptor_ca37af0df9a5bbd2, []int{73} } func (m *Monitoring) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1928,10 +2125,38 @@ func (m *Monitoring) XXX_DiscardUnknown() { var xxx_messageInfo_Monitoring proto.InternalMessageInfo +func (m *NamedResourceReference) Reset() { *m = NamedResourceReference{} } +func (*NamedResourceReference) ProtoMessage() {} +func (*NamedResourceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{74} +} +func (m *NamedResourceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedResourceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedResourceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedResourceReference.Merge(m, src) +} +func (m *NamedResourceReference) XXX_Size() int { + return m.Size() +} +func (m *NamedResourceReference) XXX_DiscardUnknown() { + xxx_messageInfo_NamedResourceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedResourceReference proto.InternalMessageInfo + func (m *Networking) Reset() { *m = Networking{} } func (*Networking) ProtoMessage() {} func (*Networking) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{67} + return fileDescriptor_ca37af0df9a5bbd2, []int{75} } func (m *Networking) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1959,7 +2184,7 @@ var xxx_messageInfo_Networking proto.InternalMessageInfo func (m *NginxIngress) Reset() { *m = NginxIngress{} } func (*NginxIngress) ProtoMessage() {} func (*NginxIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{68} + return fileDescriptor_ca37af0df9a5bbd2, []int{76} } func (m *NginxIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1987,7 +2212,7 @@ var xxx_messageInfo_NginxIngress proto.InternalMessageInfo func (m *OIDCConfig) Reset() { *m = OIDCConfig{} } func (*OIDCConfig) ProtoMessage() {} func (*OIDCConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{69} + return fileDescriptor_ca37af0df9a5bbd2, []int{77} } func (m *OIDCConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2015,7 +2240,7 @@ var xxx_messageInfo_OIDCConfig proto.InternalMessageInfo func (m *OpenIDConnectClientAuthentication) Reset() { *m = OpenIDConnectClientAuthentication{} } func (*OpenIDConnectClientAuthentication) ProtoMessage() {} func (*OpenIDConnectClientAuthentication) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{70} + return fileDescriptor_ca37af0df9a5bbd2, []int{78} } func (m *OpenIDConnectClientAuthentication) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2043,7 +2268,7 @@ var xxx_messageInfo_OpenIDConnectClientAuthentication proto.InternalMessageInfo func (m *Plant) Reset() { *m = Plant{} } func (*Plant) ProtoMessage() {} func (*Plant) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{71} + return fileDescriptor_ca37af0df9a5bbd2, []int{79} } func (m *Plant) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2071,7 +2296,7 @@ var xxx_messageInfo_Plant proto.InternalMessageInfo func (m *PlantList) Reset() { *m = PlantList{} } func (*PlantList) ProtoMessage() {} func (*PlantList) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{72} + return fileDescriptor_ca37af0df9a5bbd2, []int{80} } func (m *PlantList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2099,7 +2324,7 @@ var xxx_messageInfo_PlantList proto.InternalMessageInfo func (m *PlantSpec) Reset() { *m = PlantSpec{} } func (*PlantSpec) ProtoMessage() {} func (*PlantSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{73} + return fileDescriptor_ca37af0df9a5bbd2, []int{81} } func (m *PlantSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2127,7 +2352,7 @@ var xxx_messageInfo_PlantSpec proto.InternalMessageInfo func (m *PlantStatus) Reset() { *m = PlantStatus{} } func (*PlantStatus) ProtoMessage() {} func (*PlantStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{74} + return fileDescriptor_ca37af0df9a5bbd2, []int{82} } func (m *PlantStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2155,7 +2380,7 @@ var xxx_messageInfo_PlantStatus proto.InternalMessageInfo func (m *Project) Reset() { *m = Project{} } func (*Project) ProtoMessage() {} func (*Project) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{75} + return fileDescriptor_ca37af0df9a5bbd2, []int{83} } func (m *Project) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2183,7 +2408,7 @@ var xxx_messageInfo_Project proto.InternalMessageInfo func (m *ProjectList) Reset() { *m = ProjectList{} } func (*ProjectList) ProtoMessage() {} func (*ProjectList) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{76} + return fileDescriptor_ca37af0df9a5bbd2, []int{84} } func (m *ProjectList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2211,7 +2436,7 @@ var xxx_messageInfo_ProjectList proto.InternalMessageInfo func (m *ProjectMember) Reset() { *m = ProjectMember{} } func (*ProjectMember) ProtoMessage() {} func (*ProjectMember) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{77} + return fileDescriptor_ca37af0df9a5bbd2, []int{85} } func (m *ProjectMember) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2239,7 +2464,7 @@ var xxx_messageInfo_ProjectMember proto.InternalMessageInfo func (m *ProjectSpec) Reset() { *m = ProjectSpec{} } func (*ProjectSpec) ProtoMessage() {} func (*ProjectSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{78} + return fileDescriptor_ca37af0df9a5bbd2, []int{86} } func (m *ProjectSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2267,7 +2492,7 @@ var xxx_messageInfo_ProjectSpec proto.InternalMessageInfo func (m *ProjectStatus) Reset() { *m = ProjectStatus{} } func (*ProjectStatus) ProtoMessage() {} func (*ProjectStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{79} + return fileDescriptor_ca37af0df9a5bbd2, []int{87} } func (m *ProjectStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2292,15 +2517,15 @@ func (m *ProjectStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ProjectStatus proto.InternalMessageInfo -func (m *Provider) Reset() { *m = Provider{} } -func (*Provider) ProtoMessage() {} -func (*Provider) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{80} +func (m *ProjectTolerations) Reset() { *m = ProjectTolerations{} } +func (*ProjectTolerations) ProtoMessage() {} +func (*ProjectTolerations) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{88} } -func (m *Provider) XXX_Unmarshal(b []byte) error { +func (m *ProjectTolerations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *Provider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ProjectTolerations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -2308,27 +2533,27 @@ func (m *Provider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { } return b[:n], nil } -func (m *Provider) XXX_Merge(src proto.Message) { - xxx_messageInfo_Provider.Merge(m, src) +func (m *ProjectTolerations) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectTolerations.Merge(m, src) } -func (m *Provider) XXX_Size() int { +func (m *ProjectTolerations) XXX_Size() int { return m.Size() } -func (m *Provider) XXX_DiscardUnknown() { - xxx_messageInfo_Provider.DiscardUnknown(m) +func (m *ProjectTolerations) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectTolerations.DiscardUnknown(m) } -var xxx_messageInfo_Provider proto.InternalMessageInfo +var xxx_messageInfo_ProjectTolerations proto.InternalMessageInfo -func (m *ProviderConfig) Reset() { *m = ProviderConfig{} } -func (*ProviderConfig) ProtoMessage() {} -func (*ProviderConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{81} +func (m *Provider) Reset() { *m = Provider{} } +func (*Provider) ProtoMessage() {} +func (*Provider) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{89} } -func (m *ProviderConfig) XXX_Unmarshal(b []byte) error { +func (m *Provider) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ProviderConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *Provider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -2336,22 +2561,22 @@ func (m *ProviderConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro } return b[:n], nil } -func (m *ProviderConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProviderConfig.Merge(m, src) +func (m *Provider) XXX_Merge(src proto.Message) { + xxx_messageInfo_Provider.Merge(m, src) } -func (m *ProviderConfig) XXX_Size() int { +func (m *Provider) XXX_Size() int { return m.Size() } -func (m *ProviderConfig) XXX_DiscardUnknown() { - xxx_messageInfo_ProviderConfig.DiscardUnknown(m) +func (m *Provider) XXX_DiscardUnknown() { + xxx_messageInfo_Provider.DiscardUnknown(m) } -var xxx_messageInfo_ProviderConfig proto.InternalMessageInfo +var xxx_messageInfo_Provider proto.InternalMessageInfo func (m *Quota) Reset() { *m = Quota{} } func (*Quota) ProtoMessage() {} func (*Quota) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{82} + return fileDescriptor_ca37af0df9a5bbd2, []int{90} } func (m *Quota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2379,7 +2604,7 @@ var xxx_messageInfo_Quota proto.InternalMessageInfo func (m *QuotaList) Reset() { *m = QuotaList{} } func (*QuotaList) ProtoMessage() {} func (*QuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{83} + return fileDescriptor_ca37af0df9a5bbd2, []int{91} } func (m *QuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2407,7 +2632,7 @@ var xxx_messageInfo_QuotaList proto.InternalMessageInfo func (m *QuotaSpec) Reset() { *m = QuotaSpec{} } func (*QuotaSpec) ProtoMessage() {} func (*QuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{84} + return fileDescriptor_ca37af0df9a5bbd2, []int{92} } func (m *QuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2435,7 +2660,7 @@ var xxx_messageInfo_QuotaSpec proto.InternalMessageInfo func (m *Region) Reset() { *m = Region{} } func (*Region) ProtoMessage() {} func (*Region) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{85} + return fileDescriptor_ca37af0df9a5bbd2, []int{93} } func (m *Region) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2460,10 +2685,38 @@ func (m *Region) XXX_DiscardUnknown() { var xxx_messageInfo_Region proto.InternalMessageInfo +func (m *ResourceWatchCacheSize) Reset() { *m = ResourceWatchCacheSize{} } +func (*ResourceWatchCacheSize) ProtoMessage() {} +func (*ResourceWatchCacheSize) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{94} +} +func (m *ResourceWatchCacheSize) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceWatchCacheSize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceWatchCacheSize) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceWatchCacheSize.Merge(m, src) +} +func (m *ResourceWatchCacheSize) XXX_Size() int { + return m.Size() +} +func (m *ResourceWatchCacheSize) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceWatchCacheSize.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceWatchCacheSize proto.InternalMessageInfo + func (m *SecretBinding) Reset() { *m = SecretBinding{} } func (*SecretBinding) ProtoMessage() {} func (*SecretBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{86} + return fileDescriptor_ca37af0df9a5bbd2, []int{95} } func (m *SecretBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2491,7 +2744,7 @@ var xxx_messageInfo_SecretBinding proto.InternalMessageInfo func (m *SecretBindingList) Reset() { *m = SecretBindingList{} } func (*SecretBindingList) ProtoMessage() {} func (*SecretBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{87} + return fileDescriptor_ca37af0df9a5bbd2, []int{96} } func (m *SecretBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2519,7 +2772,7 @@ var xxx_messageInfo_SecretBindingList proto.InternalMessageInfo func (m *Seed) Reset() { *m = Seed{} } func (*Seed) ProtoMessage() {} func (*Seed) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{88} + return fileDescriptor_ca37af0df9a5bbd2, []int{97} } func (m *Seed) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2547,7 +2800,7 @@ var xxx_messageInfo_Seed proto.InternalMessageInfo func (m *SeedBackup) Reset() { *m = SeedBackup{} } func (*SeedBackup) ProtoMessage() {} func (*SeedBackup) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{89} + return fileDescriptor_ca37af0df9a5bbd2, []int{98} } func (m *SeedBackup) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2575,7 +2828,7 @@ var xxx_messageInfo_SeedBackup proto.InternalMessageInfo func (m *SeedDNS) Reset() { *m = SeedDNS{} } func (*SeedDNS) ProtoMessage() {} func (*SeedDNS) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{90} + return fileDescriptor_ca37af0df9a5bbd2, []int{99} } func (m *SeedDNS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2600,10 +2853,38 @@ func (m *SeedDNS) XXX_DiscardUnknown() { var xxx_messageInfo_SeedDNS proto.InternalMessageInfo +func (m *SeedDNSProvider) Reset() { *m = SeedDNSProvider{} } +func (*SeedDNSProvider) ProtoMessage() {} +func (*SeedDNSProvider) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{100} +} +func (m *SeedDNSProvider) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedDNSProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedDNSProvider) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedDNSProvider.Merge(m, src) +} +func (m *SeedDNSProvider) XXX_Size() int { + return m.Size() +} +func (m *SeedDNSProvider) XXX_DiscardUnknown() { + xxx_messageInfo_SeedDNSProvider.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedDNSProvider proto.InternalMessageInfo + func (m *SeedList) Reset() { *m = SeedList{} } func (*SeedList) ProtoMessage() {} func (*SeedList) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{91} + return fileDescriptor_ca37af0df9a5bbd2, []int{101} } func (m *SeedList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2631,7 +2912,7 @@ var xxx_messageInfo_SeedList proto.InternalMessageInfo func (m *SeedNetworks) Reset() { *m = SeedNetworks{} } func (*SeedNetworks) ProtoMessage() {} func (*SeedNetworks) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{92} + return fileDescriptor_ca37af0df9a5bbd2, []int{102} } func (m *SeedNetworks) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2659,7 +2940,7 @@ var xxx_messageInfo_SeedNetworks proto.InternalMessageInfo func (m *SeedProvider) Reset() { *m = SeedProvider{} } func (*SeedProvider) ProtoMessage() {} func (*SeedProvider) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{93} + return fileDescriptor_ca37af0df9a5bbd2, []int{103} } func (m *SeedProvider) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2684,10 +2965,206 @@ func (m *SeedProvider) XXX_DiscardUnknown() { var xxx_messageInfo_SeedProvider proto.InternalMessageInfo +func (m *SeedSelector) Reset() { *m = SeedSelector{} } +func (*SeedSelector) ProtoMessage() {} +func (*SeedSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{104} +} +func (m *SeedSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedSelector.Merge(m, src) +} +func (m *SeedSelector) XXX_Size() int { + return m.Size() +} +func (m *SeedSelector) XXX_DiscardUnknown() { + xxx_messageInfo_SeedSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedSelector proto.InternalMessageInfo + +func (m *SeedSettingExcessCapacityReservation) Reset() { *m = SeedSettingExcessCapacityReservation{} } +func (*SeedSettingExcessCapacityReservation) ProtoMessage() {} +func (*SeedSettingExcessCapacityReservation) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{105} +} +func (m *SeedSettingExcessCapacityReservation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedSettingExcessCapacityReservation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedSettingExcessCapacityReservation) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedSettingExcessCapacityReservation.Merge(m, src) +} +func (m *SeedSettingExcessCapacityReservation) XXX_Size() int { + return m.Size() +} +func (m *SeedSettingExcessCapacityReservation) XXX_DiscardUnknown() { + xxx_messageInfo_SeedSettingExcessCapacityReservation.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedSettingExcessCapacityReservation proto.InternalMessageInfo + +func (m *SeedSettingLoadBalancerServices) Reset() { *m = SeedSettingLoadBalancerServices{} } +func (*SeedSettingLoadBalancerServices) ProtoMessage() {} +func (*SeedSettingLoadBalancerServices) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{106} +} +func (m *SeedSettingLoadBalancerServices) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedSettingLoadBalancerServices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedSettingLoadBalancerServices) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedSettingLoadBalancerServices.Merge(m, src) +} +func (m *SeedSettingLoadBalancerServices) XXX_Size() int { + return m.Size() +} +func (m *SeedSettingLoadBalancerServices) XXX_DiscardUnknown() { + xxx_messageInfo_SeedSettingLoadBalancerServices.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedSettingLoadBalancerServices proto.InternalMessageInfo + +func (m *SeedSettingScheduling) Reset() { *m = SeedSettingScheduling{} } +func (*SeedSettingScheduling) ProtoMessage() {} +func (*SeedSettingScheduling) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{107} +} +func (m *SeedSettingScheduling) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedSettingScheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedSettingScheduling) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedSettingScheduling.Merge(m, src) +} +func (m *SeedSettingScheduling) XXX_Size() int { + return m.Size() +} +func (m *SeedSettingScheduling) XXX_DiscardUnknown() { + xxx_messageInfo_SeedSettingScheduling.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedSettingScheduling proto.InternalMessageInfo + +func (m *SeedSettingShootDNS) Reset() { *m = SeedSettingShootDNS{} } +func (*SeedSettingShootDNS) ProtoMessage() {} +func (*SeedSettingShootDNS) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{108} +} +func (m *SeedSettingShootDNS) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedSettingShootDNS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedSettingShootDNS) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedSettingShootDNS.Merge(m, src) +} +func (m *SeedSettingShootDNS) XXX_Size() int { + return m.Size() +} +func (m *SeedSettingShootDNS) XXX_DiscardUnknown() { + xxx_messageInfo_SeedSettingShootDNS.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedSettingShootDNS proto.InternalMessageInfo + +func (m *SeedSettingVerticalPodAutoscaler) Reset() { *m = SeedSettingVerticalPodAutoscaler{} } +func (*SeedSettingVerticalPodAutoscaler) ProtoMessage() {} +func (*SeedSettingVerticalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{109} +} +func (m *SeedSettingVerticalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedSettingVerticalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedSettingVerticalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedSettingVerticalPodAutoscaler.Merge(m, src) +} +func (m *SeedSettingVerticalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *SeedSettingVerticalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_SeedSettingVerticalPodAutoscaler.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedSettingVerticalPodAutoscaler proto.InternalMessageInfo + +func (m *SeedSettings) Reset() { *m = SeedSettings{} } +func (*SeedSettings) ProtoMessage() {} +func (*SeedSettings) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{110} +} +func (m *SeedSettings) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedSettings) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedSettings.Merge(m, src) +} +func (m *SeedSettings) XXX_Size() int { + return m.Size() +} +func (m *SeedSettings) XXX_DiscardUnknown() { + xxx_messageInfo_SeedSettings.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedSettings proto.InternalMessageInfo + func (m *SeedSpec) Reset() { *m = SeedSpec{} } func (*SeedSpec) ProtoMessage() {} func (*SeedSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{94} + return fileDescriptor_ca37af0df9a5bbd2, []int{111} } func (m *SeedSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2715,7 +3192,7 @@ var xxx_messageInfo_SeedSpec proto.InternalMessageInfo func (m *SeedStatus) Reset() { *m = SeedStatus{} } func (*SeedStatus) ProtoMessage() {} func (*SeedStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{95} + return fileDescriptor_ca37af0df9a5bbd2, []int{112} } func (m *SeedStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2743,7 +3220,7 @@ var xxx_messageInfo_SeedStatus proto.InternalMessageInfo func (m *SeedTaint) Reset() { *m = SeedTaint{} } func (*SeedTaint) ProtoMessage() {} func (*SeedTaint) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{96} + return fileDescriptor_ca37af0df9a5bbd2, []int{113} } func (m *SeedTaint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2768,10 +3245,38 @@ func (m *SeedTaint) XXX_DiscardUnknown() { var xxx_messageInfo_SeedTaint proto.InternalMessageInfo +func (m *SeedTemplate) Reset() { *m = SeedTemplate{} } +func (*SeedTemplate) ProtoMessage() {} +func (*SeedTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{114} +} +func (m *SeedTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeedTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SeedTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeedTemplate.Merge(m, src) +} +func (m *SeedTemplate) XXX_Size() int { + return m.Size() +} +func (m *SeedTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_SeedTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_SeedTemplate proto.InternalMessageInfo + func (m *SeedVolume) Reset() { *m = SeedVolume{} } func (*SeedVolume) ProtoMessage() {} func (*SeedVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{97} + return fileDescriptor_ca37af0df9a5bbd2, []int{115} } func (m *SeedVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2799,7 +3304,7 @@ var xxx_messageInfo_SeedVolume proto.InternalMessageInfo func (m *SeedVolumeProvider) Reset() { *m = SeedVolumeProvider{} } func (*SeedVolumeProvider) ProtoMessage() {} func (*SeedVolumeProvider) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{98} + return fileDescriptor_ca37af0df9a5bbd2, []int{116} } func (m *SeedVolumeProvider) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2827,7 +3332,7 @@ var xxx_messageInfo_SeedVolumeProvider proto.InternalMessageInfo func (m *ServiceAccountConfig) Reset() { *m = ServiceAccountConfig{} } func (*ServiceAccountConfig) ProtoMessage() {} func (*ServiceAccountConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{99} + return fileDescriptor_ca37af0df9a5bbd2, []int{117} } func (m *ServiceAccountConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2855,7 +3360,7 @@ var xxx_messageInfo_ServiceAccountConfig proto.InternalMessageInfo func (m *Shoot) Reset() { *m = Shoot{} } func (*Shoot) ProtoMessage() {} func (*Shoot) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{100} + return fileDescriptor_ca37af0df9a5bbd2, []int{118} } func (m *Shoot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2883,7 +3388,7 @@ var xxx_messageInfo_Shoot proto.InternalMessageInfo func (m *ShootList) Reset() { *m = ShootList{} } func (*ShootList) ProtoMessage() {} func (*ShootList) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{101} + return fileDescriptor_ca37af0df9a5bbd2, []int{119} } func (m *ShootList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2911,7 +3416,7 @@ var xxx_messageInfo_ShootList proto.InternalMessageInfo func (m *ShootMachineImage) Reset() { *m = ShootMachineImage{} } func (*ShootMachineImage) ProtoMessage() {} func (*ShootMachineImage) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{102} + return fileDescriptor_ca37af0df9a5bbd2, []int{120} } func (m *ShootMachineImage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2939,7 +3444,7 @@ var xxx_messageInfo_ShootMachineImage proto.InternalMessageInfo func (m *ShootNetworks) Reset() { *m = ShootNetworks{} } func (*ShootNetworks) ProtoMessage() {} func (*ShootNetworks) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{103} + return fileDescriptor_ca37af0df9a5bbd2, []int{121} } func (m *ShootNetworks) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2967,7 +3472,7 @@ var xxx_messageInfo_ShootNetworks proto.InternalMessageInfo func (m *ShootSpec) Reset() { *m = ShootSpec{} } func (*ShootSpec) ProtoMessage() {} func (*ShootSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{104} + return fileDescriptor_ca37af0df9a5bbd2, []int{122} } func (m *ShootSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2995,7 +3500,7 @@ var xxx_messageInfo_ShootSpec proto.InternalMessageInfo func (m *ShootStatus) Reset() { *m = ShootStatus{} } func (*ShootStatus) ProtoMessage() {} func (*ShootStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{105} + return fileDescriptor_ca37af0df9a5bbd2, []int{123} } func (m *ShootStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3020,10 +3525,66 @@ func (m *ShootStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ShootStatus proto.InternalMessageInfo +func (m *Toleration) Reset() { *m = Toleration{} } +func (*Toleration) ProtoMessage() {} +func (*Toleration) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{124} +} +func (m *Toleration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Toleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Toleration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Toleration.Merge(m, src) +} +func (m *Toleration) XXX_Size() int { + return m.Size() +} +func (m *Toleration) XXX_DiscardUnknown() { + xxx_messageInfo_Toleration.DiscardUnknown(m) +} + +var xxx_messageInfo_Toleration proto.InternalMessageInfo + +func (m *VerticalPodAutoscaler) Reset() { *m = VerticalPodAutoscaler{} } +func (*VerticalPodAutoscaler) ProtoMessage() {} +func (*VerticalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{125} +} +func (m *VerticalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VerticalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VerticalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_VerticalPodAutoscaler.Merge(m, src) +} +func (m *VerticalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *VerticalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_VerticalPodAutoscaler.DiscardUnknown(m) +} + +var xxx_messageInfo_VerticalPodAutoscaler proto.InternalMessageInfo + func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{106} + return fileDescriptor_ca37af0df9a5bbd2, []int{126} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3051,7 +3612,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeType) Reset() { *m = VolumeType{} } func (*VolumeType) ProtoMessage() {} func (*VolumeType) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{107} + return fileDescriptor_ca37af0df9a5bbd2, []int{127} } func (m *VolumeType) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3076,10 +3637,38 @@ func (m *VolumeType) XXX_DiscardUnknown() { var xxx_messageInfo_VolumeType proto.InternalMessageInfo +func (m *WatchCacheSizes) Reset() { *m = WatchCacheSizes{} } +func (*WatchCacheSizes) ProtoMessage() {} +func (*WatchCacheSizes) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{128} +} +func (m *WatchCacheSizes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WatchCacheSizes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WatchCacheSizes) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchCacheSizes.Merge(m, src) +} +func (m *WatchCacheSizes) XXX_Size() int { + return m.Size() +} +func (m *WatchCacheSizes) XXX_DiscardUnknown() { + xxx_messageInfo_WatchCacheSizes.DiscardUnknown(m) +} + +var xxx_messageInfo_WatchCacheSizes proto.InternalMessageInfo + func (m *Worker) Reset() { *m = Worker{} } func (*Worker) ProtoMessage() {} func (*Worker) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{108} + return fileDescriptor_ca37af0df9a5bbd2, []int{129} } func (m *Worker) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3107,7 +3696,7 @@ var xxx_messageInfo_Worker proto.InternalMessageInfo func (m *WorkerKubernetes) Reset() { *m = WorkerKubernetes{} } func (*WorkerKubernetes) ProtoMessage() {} func (*WorkerKubernetes) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{109} + return fileDescriptor_ca37af0df9a5bbd2, []int{130} } func (m *WorkerKubernetes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3132,6 +3721,34 @@ func (m *WorkerKubernetes) XXX_DiscardUnknown() { var xxx_messageInfo_WorkerKubernetes proto.InternalMessageInfo +func (m *WorkerSystemComponents) Reset() { *m = WorkerSystemComponents{} } +func (*WorkerSystemComponents) ProtoMessage() {} +func (*WorkerSystemComponents) Descriptor() ([]byte, []int) { + return fileDescriptor_ca37af0df9a5bbd2, []int{131} +} +func (m *WorkerSystemComponents) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkerSystemComponents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkerSystemComponents) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkerSystemComponents.Merge(m, src) +} +func (m *WorkerSystemComponents) XXX_Size() int { + return m.Size() +} +func (m *WorkerSystemComponents) XXX_DiscardUnknown() { + xxx_messageInfo_WorkerSystemComponents.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkerSystemComponents proto.InternalMessageInfo + func init() { proto.RegisterType((*Addon)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Addon") proto.RegisterType((*Addons)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Addons") @@ -3170,6 +3787,7 @@ func init() { proto.RegisterType((*DNS)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.DNS") proto.RegisterType((*DNSIncludeExclude)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.DNSIncludeExclude") proto.RegisterType((*DNSProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.DNSProvider") + proto.RegisterType((*DataVolume)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.DataVolume") proto.RegisterType((*Endpoint)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Endpoint") proto.RegisterType((*ExpirableVersion)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ExpirableVersion") proto.RegisterType((*Extension)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Extension") @@ -3177,8 +3795,11 @@ func init() { proto.RegisterType((*Hibernation)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Hibernation") proto.RegisterType((*HibernationSchedule)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.HibernationSchedule") proto.RegisterType((*HorizontalPodAutoscalerConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.HorizontalPodAutoscalerConfig") + proto.RegisterType((*Ingress)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Ingress") + proto.RegisterType((*IngressController)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.IngressController") proto.RegisterType((*KubeAPIServerConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeAPIServerConfig") proto.RegisterMapType((map[string]bool)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeAPIServerConfig.RuntimeConfigEntry") + proto.RegisterType((*KubeAPIServerRequests)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeAPIServerRequests") proto.RegisterType((*KubeControllerManagerConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeControllerManagerConfig") proto.RegisterType((*KubeProxyConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeProxyConfig") proto.RegisterType((*KubeSchedulerConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeSchedulerConfig") @@ -3186,6 +3807,7 @@ func init() { proto.RegisterType((*KubeletConfigEviction)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeletConfigEviction") proto.RegisterType((*KubeletConfigEvictionMinimumReclaim)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeletConfigEvictionMinimumReclaim") proto.RegisterType((*KubeletConfigEvictionSoftGracePeriod)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeletConfigEvictionSoftGracePeriod") + proto.RegisterType((*KubeletConfigReserved)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubeletConfigReserved") proto.RegisterType((*Kubernetes)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Kubernetes") proto.RegisterType((*KubernetesConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubernetesConfig") proto.RegisterMapType((map[string]bool)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.KubernetesConfig.FeatureGatesEntry") @@ -3195,13 +3817,16 @@ func init() { proto.RegisterType((*LastError)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.LastError") proto.RegisterType((*LastOperation)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.LastOperation") proto.RegisterType((*Machine)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Machine") + proto.RegisterType((*MachineControllerManagerSettings)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.MachineControllerManagerSettings") proto.RegisterType((*MachineImage)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.MachineImage") + proto.RegisterType((*MachineImageVersion)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.MachineImageVersion") proto.RegisterType((*MachineType)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.MachineType") proto.RegisterType((*MachineTypeStorage)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.MachineTypeStorage") proto.RegisterType((*Maintenance)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Maintenance") proto.RegisterType((*MaintenanceAutoUpdate)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.MaintenanceAutoUpdate") proto.RegisterType((*MaintenanceTimeWindow)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.MaintenanceTimeWindow") proto.RegisterType((*Monitoring)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Monitoring") + proto.RegisterType((*NamedResourceReference)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.NamedResourceReference") proto.RegisterType((*Networking)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Networking") proto.RegisterType((*NginxIngress)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.NginxIngress") proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.NginxIngress.ConfigEntry") @@ -3218,24 +3843,38 @@ func init() { proto.RegisterType((*ProjectMember)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ProjectMember") proto.RegisterType((*ProjectSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ProjectSpec") proto.RegisterType((*ProjectStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ProjectStatus") + proto.RegisterType((*ProjectTolerations)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ProjectTolerations") proto.RegisterType((*Provider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Provider") - proto.RegisterType((*ProviderConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ProviderConfig") proto.RegisterType((*Quota)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Quota") proto.RegisterType((*QuotaList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.QuotaList") proto.RegisterType((*QuotaSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.QuotaSpec") proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.QuotaSpec.MetricsEntry") proto.RegisterType((*Region)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Region") + proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Region.LabelsEntry") + proto.RegisterType((*ResourceWatchCacheSize)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ResourceWatchCacheSize") proto.RegisterType((*SecretBinding)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SecretBinding") proto.RegisterType((*SecretBindingList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SecretBindingList") proto.RegisterType((*Seed)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Seed") proto.RegisterType((*SeedBackup)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedBackup") proto.RegisterType((*SeedDNS)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedDNS") + proto.RegisterType((*SeedDNSProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedDNSProvider") proto.RegisterType((*SeedList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedList") proto.RegisterType((*SeedNetworks)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedNetworks") proto.RegisterType((*SeedProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedProvider") + proto.RegisterType((*SeedSelector)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSelector") + proto.RegisterType((*SeedSettingExcessCapacityReservation)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettingExcessCapacityReservation") + proto.RegisterType((*SeedSettingLoadBalancerServices)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettingLoadBalancerServices") + proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettingLoadBalancerServices.AnnotationsEntry") + proto.RegisterType((*SeedSettingScheduling)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettingScheduling") + proto.RegisterType((*SeedSettingShootDNS)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettingShootDNS") + proto.RegisterType((*SeedSettingVerticalPodAutoscaler)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettingVerticalPodAutoscaler") + proto.RegisterType((*SeedSettings)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettings") proto.RegisterType((*SeedSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSpec") proto.RegisterType((*SeedStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedStatus") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedStatus.AllocatableEntry") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedStatus.CapacityEntry") proto.RegisterType((*SeedTaint)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedTaint") + proto.RegisterType((*SeedTemplate)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedTemplate") proto.RegisterType((*SeedVolume)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedVolume") proto.RegisterType((*SeedVolumeProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedVolumeProvider") proto.RegisterType((*ServiceAccountConfig)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ServiceAccountConfig") @@ -3245,12 +3884,16 @@ func init() { proto.RegisterType((*ShootNetworks)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ShootNetworks") proto.RegisterType((*ShootSpec)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ShootSpec") proto.RegisterType((*ShootStatus)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.ShootStatus") + proto.RegisterType((*Toleration)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Toleration") + proto.RegisterType((*VerticalPodAutoscaler)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.VerticalPodAutoscaler") proto.RegisterType((*Volume)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Volume") proto.RegisterType((*VolumeType)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.VolumeType") + proto.RegisterType((*WatchCacheSizes)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.WatchCacheSizes") proto.RegisterType((*Worker)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Worker") proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Worker.AnnotationsEntry") proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.Worker.LabelsEntry") proto.RegisterType((*WorkerKubernetes)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.WorkerKubernetes") + proto.RegisterType((*WorkerSystemComponents)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.WorkerSystemComponents") } func init() { @@ -3258,457 +3901,562 @@ func init() { } var fileDescriptor_ca37af0df9a5bbd2 = []byte{ - // 7186 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x3d, 0x5b, 0x6c, 0x24, 0xc7, - 0x71, 0x9a, 0x5d, 0x2e, 0xc9, 0xad, 0x25, 0x79, 0x64, 0xdf, 0x43, 0x14, 0xa5, 0xbb, 0x95, 0x46, - 0xb6, 0x21, 0x47, 0x36, 0xcf, 0x7a, 0xc5, 0x96, 0x64, 0x3d, 0xb8, 0xbb, 0xbc, 0xbb, 0xf5, 0x1d, - 0x79, 0xeb, 0xde, 0xa3, 0x64, 0xcb, 0x89, 0xe2, 0xe1, 0x4c, 0x73, 0x39, 0xe2, 0xec, 0xcc, 0x6a, - 0x66, 0x96, 0x47, 0x5e, 0x1e, 0x76, 0x14, 0xe4, 0x61, 0xe5, 0xa5, 0x38, 0x71, 0x8c, 0xd8, 0x71, - 0x10, 0x24, 0x41, 0xf2, 0x61, 0xc4, 0x36, 0x60, 0x07, 0x70, 0x90, 0xe4, 0x27, 0x40, 0x60, 0xd9, - 0x40, 0x00, 0x23, 0x88, 0x11, 0x05, 0x70, 0xd6, 0x31, 0x03, 0x18, 0x09, 0x12, 0x20, 0x7f, 0x41, - 0x70, 0xc8, 0x47, 0xd0, 0x8f, 0x99, 0xe9, 0x99, 0x9d, 0xe1, 0x91, 0xb3, 0x3c, 0x4a, 0xf7, 0x45, - 0x6e, 0x57, 0x77, 0x55, 0x3f, 0xaa, 0xab, 0xab, 0xaa, 0xab, 0x6b, 0xa0, 0xd6, 0x31, 0xfd, 0xcd, - 0xfe, 0xfa, 0xa2, 0xee, 0x74, 0xcf, 0x77, 0x34, 0xd7, 0x20, 0x36, 0x71, 0xa3, 0x7f, 0x7a, 0x5b, - 0x9d, 0xf3, 0x5a, 0xcf, 0xf4, 0xce, 0xeb, 0x8e, 0x4b, 0xce, 0x6f, 0x3f, 0xb2, 0x4e, 0x7c, 0xed, - 0x91, 0xf3, 0x1d, 0x0a, 0xd3, 0x7c, 0x62, 0x2c, 0xf6, 0x5c, 0xc7, 0x77, 0xd0, 0xa3, 0x11, 0x8e, - 0xc5, 0xa0, 0x69, 0xf4, 0x4f, 0x6f, 0xab, 0xb3, 0x48, 0x71, 0x2c, 0x52, 0x1c, 0x8b, 0x02, 0xc7, - 0xc2, 0xfb, 0x65, 0xba, 0x4e, 0xc7, 0x39, 0xcf, 0x50, 0xad, 0xf7, 0x37, 0xd8, 0x2f, 0xf6, 0x83, - 0xfd, 0xc7, 0x49, 0x2c, 0xa8, 0x5b, 0x1f, 0xf2, 0x16, 0x4d, 0x87, 0x76, 0x26, 0xe8, 0x4b, 0xb2, - 0x1b, 0xb1, 0x3a, 0xee, 0xba, 0xa6, 0xa7, 0xd5, 0x79, 0x3c, 0xaa, 0xd3, 0xd5, 0xf4, 0x4d, 0xd3, - 0x26, 0xee, 0x6e, 0x30, 0xca, 0xf3, 0x2e, 0xf1, 0x9c, 0xbe, 0xab, 0x93, 0x43, 0xb5, 0xf2, 0xce, - 0x77, 0x89, 0xaf, 0xa5, 0xd1, 0x3a, 0x9f, 0xd5, 0xca, 0xed, 0xdb, 0xbe, 0xd9, 0x1d, 0x26, 0xf3, - 0xe3, 0xb7, 0x6a, 0xe0, 0xe9, 0x9b, 0xa4, 0xab, 0x0d, 0xb5, 0x7b, 0x2c, 0xab, 0x5d, 0xdf, 0x37, - 0xad, 0xf3, 0xa6, 0xed, 0x7b, 0xbe, 0x9b, 0x6c, 0xa4, 0x3e, 0x0a, 0xa5, 0x25, 0xc3, 0x70, 0x6c, - 0xf4, 0x5e, 0x98, 0x20, 0xb6, 0xb6, 0x6e, 0x11, 0x63, 0x5e, 0xb9, 0x5f, 0x79, 0x68, 0xb2, 0x76, - 0xe2, 0xcd, 0x41, 0xf5, 0xae, 0xbd, 0x41, 0x75, 0x62, 0x99, 0x17, 0xe3, 0x00, 0xae, 0x7e, 0xae, - 0x00, 0xe3, 0xac, 0x91, 0x87, 0x3e, 0xab, 0xc0, 0xc9, 0xad, 0xfe, 0x3a, 0x71, 0x6d, 0xe2, 0x13, - 0xaf, 0xa1, 0x79, 0x9b, 0xeb, 0x8e, 0xe6, 0x72, 0x14, 0x95, 0x47, 0x2f, 0x2e, 0x1e, 0x9e, 0x25, - 0x16, 0x2f, 0x0f, 0xa3, 0xab, 0xdd, 0xbd, 0x37, 0xa8, 0x9e, 0x4c, 0x01, 0xe0, 0x34, 0xe2, 0x68, - 0x1b, 0xa6, 0xec, 0x8e, 0x69, 0xef, 0x34, 0xed, 0x8e, 0x4b, 0x3c, 0x6f, 0xbe, 0xc0, 0x3a, 0xf3, - 0x7c, 0x9e, 0xce, 0xac, 0x4a, 0x78, 0x6a, 0xb3, 0x7b, 0x83, 0xea, 0x94, 0x5c, 0x82, 0x63, 0x74, - 0xd4, 0x2f, 0x2a, 0x70, 0x62, 0xc9, 0xe8, 0x9a, 0x9e, 0x67, 0x3a, 0x76, 0xcb, 0xea, 0x77, 0x4c, - 0x1b, 0xdd, 0x0f, 0x63, 0xb6, 0xd6, 0x25, 0x6c, 0x42, 0xca, 0xb5, 0x29, 0x31, 0xa7, 0x63, 0xab, - 0x5a, 0x97, 0x60, 0x06, 0x41, 0x1b, 0x30, 0xae, 0x3b, 0xf6, 0x86, 0xd9, 0x11, 0xfd, 0xac, 0xe5, - 0xe9, 0x67, 0xcb, 0x75, 0xb6, 0x4d, 0x83, 0xb8, 0x75, 0x86, 0xa9, 0x06, 0x7b, 0x83, 0xea, 0x38, - 0xff, 0x1f, 0x0b, 0xec, 0xea, 0x05, 0x98, 0x5c, 0xb2, 0x88, 0xeb, 0x9b, 0x76, 0x07, 0x3d, 0x05, - 0x33, 0xa4, 0xab, 0x99, 0x16, 0x26, 0x3a, 0x31, 0xb7, 0x89, 0xeb, 0xcd, 0x2b, 0xf7, 0x17, 0x1f, - 0x2a, 0xd7, 0xd0, 0xde, 0xa0, 0x3a, 0xb3, 0x1c, 0x83, 0xe0, 0x44, 0x4d, 0xf5, 0xe7, 0x15, 0xa8, - 0x2c, 0xf5, 0x0d, 0xd3, 0xe7, 0xf8, 0x91, 0x0b, 0x15, 0x8d, 0xfe, 0x6c, 0x39, 0x96, 0xa9, 0xef, - 0x8a, 0x95, 0x7f, 0x2e, 0xcf, 0x20, 0x96, 0x22, 0x34, 0xb5, 0x13, 0x7b, 0x83, 0x6a, 0x45, 0x2a, - 0xc0, 0x32, 0x11, 0x75, 0x13, 0x64, 0x18, 0xfa, 0x38, 0x4c, 0xf1, 0x41, 0xae, 0x68, 0x3d, 0x4c, - 0x36, 0x44, 0x1f, 0x1e, 0x5c, 0xe4, 0x1b, 0x82, 0x52, 0x0a, 0x08, 0x2d, 0x5e, 0x5d, 0x7f, 0x85, - 0xe8, 0x3e, 0x26, 0x1b, 0xc4, 0x25, 0xb6, 0x4e, 0xf8, 0x9a, 0xd6, 0xa5, 0xc6, 0x38, 0x86, 0x4a, - 0xfd, 0x81, 0x02, 0xb3, 0x4b, 0xdb, 0x9a, 0x69, 0x69, 0xeb, 0xa6, 0x65, 0xfa, 0xbb, 0x2f, 0x39, - 0x36, 0x39, 0xc0, 0xa2, 0xae, 0xc1, 0xdd, 0x7d, 0x5b, 0xe3, 0xed, 0x2c, 0xb2, 0xc2, 0xb7, 0xe3, - 0xb5, 0xdd, 0x1e, 0xa1, 0xdc, 0x48, 0x67, 0xfa, 0xde, 0xbd, 0x41, 0xf5, 0xee, 0xb5, 0xf4, 0x2a, - 0x38, 0xab, 0x2d, 0xc2, 0x70, 0x46, 0x02, 0xbd, 0xe0, 0x58, 0xfd, 0xae, 0xc0, 0x5a, 0x64, 0x58, - 0x17, 0xf6, 0x06, 0xd5, 0x33, 0x6b, 0xa9, 0x35, 0x70, 0x46, 0x4b, 0xf5, 0xcd, 0x02, 0x4c, 0xd5, - 0x34, 0x7d, 0xab, 0xdf, 0xab, 0xf5, 0xf5, 0x2d, 0xe2, 0xa3, 0x4f, 0xc2, 0x24, 0x95, 0x65, 0x86, - 0xe6, 0x6b, 0x62, 0x26, 0x3f, 0x20, 0xcd, 0x64, 0x28, 0x5a, 0xa2, 0x45, 0xa4, 0xb5, 0xa3, 0xb9, - 0x5d, 0x21, 0xbe, 0x56, 0x43, 0x62, 0x4e, 0x20, 0x2a, 0xc3, 0x21, 0x56, 0xb4, 0x01, 0x63, 0x5e, - 0x8f, 0xe8, 0x82, 0xe1, 0x1b, 0x79, 0x78, 0x45, 0xee, 0x71, 0xbb, 0x47, 0xf4, 0x68, 0x15, 0xe8, - 0x2f, 0xcc, 0xf0, 0x23, 0x1b, 0xc6, 0x3d, 0x5f, 0xf3, 0xfb, 0x74, 0x7a, 0x28, 0xa5, 0x0b, 0x23, - 0x53, 0x62, 0xd8, 0x6a, 0x33, 0x82, 0xd6, 0x38, 0xff, 0x8d, 0x05, 0x15, 0xf5, 0x9f, 0x14, 0x98, - 0x95, 0xab, 0x5f, 0x31, 0x3d, 0x1f, 0xfd, 0xc4, 0xd0, 0x74, 0x2e, 0x1e, 0x6c, 0x3a, 0x69, 0x6b, - 0x36, 0x99, 0xb3, 0x82, 0xdc, 0x64, 0x50, 0x22, 0x4d, 0x25, 0x81, 0x92, 0xe9, 0x93, 0x2e, 0x67, - 0xab, 0x9c, 0x42, 0x4e, 0xee, 0x72, 0x6d, 0x5a, 0x10, 0x2b, 0x35, 0x29, 0x5a, 0xcc, 0xb1, 0xab, - 0x9f, 0x84, 0x53, 0x72, 0xad, 0x40, 0xdc, 0xd0, 0x9d, 0xe0, 0xef, 0xf6, 0x86, 0x76, 0x02, 0xe5, - 0x2c, 0xcc, 0x20, 0xe8, 0x3d, 0x30, 0xee, 0x92, 0x8e, 0xe9, 0xd8, 0x6c, 0xb5, 0xcb, 0xd1, 0xdc, - 0x61, 0x56, 0x8a, 0x05, 0x54, 0x7d, 0xbd, 0x18, 0x9f, 0x3b, 0xba, 0x8c, 0x68, 0x1b, 0x26, 0x7b, - 0x82, 0x94, 0x98, 0xbb, 0x4b, 0xa3, 0x0e, 0x30, 0xe8, 0x7a, 0x34, 0xab, 0x41, 0x09, 0x0e, 0x69, - 0xa1, 0x9f, 0x83, 0x99, 0x5e, 0x4c, 0xa2, 0x1e, 0xa1, 0x6c, 0x66, 0x32, 0x36, 0x5e, 0x86, 0x13, - 0xd4, 0xd0, 0x35, 0x28, 0x7b, 0x44, 0x77, 0x09, 0x15, 0x54, 0x82, 0x77, 0x53, 0xa5, 0x59, 0x3b, - 0xa8, 0x24, 0xa4, 0xd9, 0x9c, 0x18, 0x53, 0x39, 0x04, 0xe0, 0x08, 0x11, 0x7a, 0x08, 0x26, 0x3d, - 0x42, 0x0c, 0x2a, 0xa6, 0xe6, 0xc7, 0xf8, 0x82, 0xd1, 0xf1, 0xb7, 0x45, 0x19, 0x0e, 0xa1, 0xea, - 0x5f, 0x8c, 0x01, 0x1a, 0xe6, 0x7b, 0x79, 0x5a, 0x78, 0x89, 0x58, 0x94, 0x23, 0x9f, 0x16, 0xb1, - 0xaf, 0x12, 0xd4, 0xd0, 0x0d, 0x98, 0xb6, 0x34, 0xcf, 0xbf, 0xda, 0xa3, 0x2a, 0x4c, 0xc0, 0x52, - 0x95, 0x47, 0x97, 0xf2, 0x90, 0xbf, 0x22, 0x23, 0xaa, 0xcd, 0xed, 0x0d, 0xaa, 0xd3, 0xb1, 0x22, - 0x1c, 0x27, 0x85, 0x5e, 0x81, 0x32, 0x2d, 0x58, 0x76, 0x5d, 0xc7, 0x15, 0x4b, 0xf2, 0x4c, 0x5e, - 0xba, 0x0c, 0x49, 0x6d, 0x9a, 0x2e, 0x54, 0xf8, 0x13, 0x47, 0xe8, 0xd1, 0x47, 0x00, 0x39, 0xeb, - 0x1e, 0x71, 0xb7, 0x89, 0x71, 0x91, 0xeb, 0x6b, 0x74, 0xb0, 0x74, 0xc9, 0x8a, 0xb5, 0x05, 0xb1, - 0xc4, 0xe8, 0xea, 0x50, 0x0d, 0x9c, 0xd2, 0x0a, 0x6d, 0x01, 0x0a, 0x75, 0xbe, 0x90, 0x2b, 0xe6, - 0x4b, 0x07, 0xe7, 0xa9, 0x33, 0x94, 0xd8, 0xc5, 0x21, 0x14, 0x38, 0x05, 0xad, 0xfa, 0x77, 0x05, - 0xa8, 0x70, 0xbe, 0x59, 0xb6, 0x7d, 0x77, 0xf7, 0x18, 0x8e, 0x12, 0x12, 0x3b, 0x4a, 0xea, 0xf9, - 0xa5, 0x03, 0xeb, 0x70, 0xe6, 0x49, 0xd2, 0x4d, 0x9c, 0x24, 0xcb, 0xa3, 0x12, 0xda, 0xff, 0x20, - 0xf9, 0x9e, 0x02, 0x27, 0xa4, 0xda, 0xc7, 0x70, 0x8e, 0x18, 0xf1, 0x73, 0xe4, 0xb9, 0x11, 0xc7, - 0x97, 0x71, 0x8c, 0x38, 0xb1, 0x61, 0x31, 0x11, 0xff, 0x28, 0xc0, 0x3a, 0x93, 0x31, 0xab, 0x91, - 0x46, 0x15, 0x2e, 0x79, 0x2d, 0x84, 0x60, 0xa9, 0x56, 0x4c, 0x90, 0x15, 0xf6, 0x15, 0x64, 0xdf, - 0x2a, 0xc0, 0xdc, 0xd0, 0xb4, 0x0f, 0xcb, 0x11, 0xe5, 0x6d, 0x92, 0x23, 0x85, 0xb7, 0x43, 0x8e, - 0x14, 0xf3, 0xc8, 0x11, 0xf5, 0x6f, 0x15, 0x28, 0xd6, 0x71, 0x13, 0x3d, 0x1c, 0xd3, 0x7d, 0xef, - 0x96, 0x75, 0xdf, 0x9b, 0x83, 0xea, 0x44, 0x1d, 0x37, 0x25, 0x35, 0xf8, 0xd7, 0x15, 0x98, 0xd3, - 0x1d, 0xdb, 0xd7, 0x28, 0x6b, 0x62, 0x6e, 0xbe, 0x06, 0x2c, 0x96, 0x4b, 0xed, 0xab, 0x27, 0x90, - 0xd5, 0xee, 0x11, 0x1d, 0x98, 0x4b, 0x42, 0x3c, 0x3c, 0x4c, 0x59, 0x5d, 0x83, 0x72, 0xdd, 0x72, - 0xfa, 0x46, 0xd3, 0xde, 0x70, 0x8e, 0x50, 0x77, 0x79, 0x4b, 0x81, 0x29, 0x86, 0xb7, 0xe5, 0x3a, - 0x1b, 0xa6, 0x45, 0xee, 0x0c, 0x15, 0x5a, 0xee, 0x71, 0x96, 0xe0, 0x63, 0x2a, 0xad, 0x5c, 0xf1, - 0x0e, 0x51, 0x69, 0xe5, 0x2e, 0x67, 0xc8, 0xa2, 0x2f, 0x4d, 0xc4, 0x47, 0xc6, 0xa4, 0xd1, 0x43, - 0x30, 0xa9, 0x6b, 0xb5, 0xbe, 0x6d, 0x58, 0x21, 0x5f, 0xd0, 0x5e, 0xd6, 0x97, 0x78, 0x19, 0x0e, - 0xa1, 0xe8, 0x06, 0x40, 0xe4, 0x7b, 0x10, 0xcb, 0x70, 0x61, 0x34, 0x7f, 0x47, 0x9b, 0xf8, 0xd4, - 0x3a, 0xf7, 0xa2, 0xa5, 0x8f, 0x60, 0x58, 0xa2, 0x86, 0x7e, 0x16, 0xa6, 0xc5, 0x24, 0x37, 0xbb, - 0x5a, 0x47, 0x58, 0x7f, 0x39, 0x67, 0x6a, 0x45, 0x42, 0x54, 0x3b, 0x2d, 0x08, 0x4f, 0xcb, 0xa5, - 0x1e, 0x8e, 0x53, 0x43, 0xbb, 0x30, 0xd5, 0x95, 0x2d, 0xda, 0xb1, 0xfc, 0x47, 0x86, 0x64, 0xdd, - 0xd6, 0x4e, 0x09, 0xe2, 0x53, 0x31, 0x5b, 0x38, 0x46, 0x2a, 0x45, 0x31, 0x2f, 0x1d, 0xab, 0x62, - 0x4e, 0x60, 0x82, 0xef, 0x79, 0x6f, 0x7e, 0x9c, 0x8d, 0xfa, 0xa9, 0x3c, 0x84, 0xb9, 0xf8, 0x88, - 0x3c, 0x6c, 0xfc, 0xb7, 0x87, 0x03, 0xdc, 0xc8, 0x84, 0x29, 0x7a, 0x84, 0xb5, 0x89, 0x45, 0x74, - 0xdf, 0x71, 0xe7, 0x27, 0xd8, 0x20, 0x1f, 0x3b, 0xe0, 0x26, 0xd3, 0xd6, 0x89, 0x15, 0x34, 0xe5, - 0x0e, 0x8e, 0xb6, 0x84, 0x0c, 0xc7, 0x50, 0x87, 0x52, 0x70, 0x32, 0x53, 0x0a, 0xf6, 0xa1, 0xb2, - 0x2d, 0x79, 0x1a, 0xca, 0x6c, 0xdc, 0xcf, 0xe6, 0x19, 0x77, 0xe4, 0x76, 0xa8, 0x9d, 0x14, 0x84, - 0x2a, 0xb2, 0x8b, 0x42, 0xa6, 0xa3, 0xfe, 0xf2, 0x38, 0xcc, 0xd5, 0xad, 0xbe, 0xe7, 0x13, 0x77, - 0xa9, 0xef, 0x3b, 0x9e, 0xae, 0x59, 0xc4, 0x45, 0xaf, 0x29, 0x70, 0x86, 0xfd, 0xdb, 0x70, 0xae, - 0xdb, 0x0d, 0x62, 0x69, 0xbb, 0x4b, 0x1b, 0xb4, 0x86, 0x61, 0x1c, 0x4e, 0x12, 0x35, 0xfa, 0xe2, - 0xc4, 0x66, 0x2e, 0x93, 0x76, 0x2a, 0x46, 0x9c, 0x41, 0x09, 0xfd, 0xaa, 0x02, 0xf7, 0xa4, 0x80, - 0x1a, 0xc4, 0x22, 0x3e, 0x11, 0xb2, 0xe0, 0xb0, 0xfd, 0x38, 0xbb, 0x37, 0xa8, 0xde, 0xd3, 0xce, - 0x42, 0x8a, 0xb3, 0xe9, 0xa1, 0xdf, 0x50, 0x60, 0x21, 0x05, 0x7a, 0x41, 0x33, 0xad, 0xbe, 0x4b, - 0x84, 0xc2, 0x7a, 0xd8, 0xee, 0x9c, 0xdb, 0x1b, 0x54, 0x17, 0xda, 0x99, 0x58, 0xf1, 0x3e, 0x14, - 0xd1, 0xa7, 0xe0, 0x74, 0x08, 0x5d, 0xb3, 0x6d, 0x42, 0x0c, 0x62, 0x5c, 0x33, 0x85, 0xd1, 0x79, - 0xf8, 0xae, 0xdc, 0xb3, 0x37, 0xa8, 0x9e, 0x6e, 0xa7, 0x21, 0xc4, 0xe9, 0x74, 0x50, 0x07, 0xce, - 0x46, 0x00, 0xdf, 0xb4, 0xcc, 0x1b, 0x0c, 0xd3, 0xb5, 0x4d, 0x97, 0x78, 0x9b, 0x8e, 0x65, 0x30, - 0xa1, 0xa1, 0xd4, 0x1e, 0xd8, 0x1b, 0x54, 0xcf, 0xb6, 0xf7, 0xab, 0x88, 0xf7, 0xc7, 0x83, 0x0c, - 0x98, 0xf2, 0x74, 0xcd, 0x6e, 0xda, 0x3e, 0x71, 0xb7, 0x35, 0x6b, 0x7e, 0x3c, 0xd7, 0x00, 0xf9, - 0x16, 0x95, 0xf0, 0xe0, 0x18, 0x56, 0xf5, 0x3f, 0x14, 0xa8, 0x88, 0x9d, 0xc0, 0x14, 0x97, 0x75, - 0x28, 0xe9, 0xf4, 0xe0, 0x12, 0x1c, 0xff, 0x4c, 0xee, 0x03, 0x92, 0x62, 0x8b, 0x4e, 0x47, 0x56, - 0x84, 0x39, 0x6a, 0xb4, 0x9d, 0x72, 0xbc, 0xd5, 0x46, 0x3b, 0xde, 0x18, 0xb5, 0x5b, 0x1c, 0x6d, - 0xea, 0xd7, 0x8a, 0x50, 0xae, 0x3b, 0xb6, 0x61, 0x32, 0x65, 0xf9, 0x91, 0x98, 0x8a, 0x76, 0x56, - 0x16, 0x4e, 0x37, 0x07, 0xd5, 0xe9, 0xb0, 0xa2, 0x24, 0xad, 0x9e, 0x0c, 0x2d, 0x35, 0xae, 0xb3, - 0x3d, 0x10, 0x37, 0xb1, 0x6e, 0x0e, 0xaa, 0x27, 0xc2, 0x66, 0x71, 0xab, 0x0b, 0x6d, 0x03, 0xa2, - 0xba, 0xf3, 0x35, 0x57, 0xb3, 0x3d, 0x8e, 0x96, 0x32, 0x2d, 0xdf, 0x3f, 0x3f, 0x76, 0xb0, 0x35, - 0xa5, 0x2d, 0x22, 0xd5, 0xfa, 0xca, 0x10, 0x36, 0x9c, 0x42, 0x01, 0xbd, 0x02, 0x33, 0xb4, 0x74, - 0xad, 0x67, 0x68, 0x3e, 0x91, 0x36, 0xca, 0x61, 0x68, 0x9e, 0x11, 0x34, 0x67, 0xae, 0xc4, 0x30, - 0xe1, 0x04, 0x66, 0xae, 0xd2, 0x6a, 0x9e, 0x63, 0xb3, 0x3d, 0x10, 0x53, 0x69, 0x69, 0x29, 0x16, - 0x50, 0xf4, 0x5e, 0x98, 0xe8, 0x12, 0xcf, 0xd3, 0x3a, 0x84, 0x31, 0x75, 0x39, 0x3a, 0xac, 0x56, - 0x78, 0x31, 0x0e, 0xe0, 0xea, 0x37, 0xa8, 0x8a, 0x98, 0x50, 0xb5, 0x0f, 0xa0, 0x5c, 0xbf, 0xcd, - 0x3e, 0x36, 0xf5, 0x9b, 0x0a, 0x9c, 0xa2, 0xdd, 0x76, 0x1d, 0xcb, 0xa2, 0xb2, 0xb4, 0x67, 0x39, - 0xbb, 0x5d, 0x62, 0xfb, 0x77, 0x40, 0xd7, 0x7f, 0x54, 0x80, 0x33, 0x51, 0xd7, 0x9b, 0xb6, 0xe7, - 0x6b, 0x96, 0xc5, 0xcd, 0xcb, 0xdb, 0x6f, 0x79, 0xf4, 0x62, 0x96, 0xc7, 0x6a, 0x5e, 0x2b, 0x6e, - 0xb8, 0xef, 0x99, 0xce, 0x97, 0x9d, 0x84, 0xf3, 0xa5, 0x75, 0x84, 0x34, 0xf7, 0xf7, 0xc3, 0xfc, - 0xa7, 0x02, 0x0b, 0xe9, 0x0d, 0x8f, 0xc1, 0x0e, 0x72, 0xe2, 0x76, 0xd0, 0x47, 0x8e, 0x6e, 0xd4, - 0x19, 0x16, 0xd1, 0x3f, 0x66, 0x8e, 0x96, 0xd9, 0x46, 0x1b, 0x70, 0x82, 0xea, 0xa7, 0x9e, 0x2f, - 0xbc, 0x04, 0x87, 0xbb, 0x68, 0x0b, 0x3c, 0x05, 0x27, 0x70, 0x1c, 0x07, 0x4e, 0x22, 0x45, 0xab, - 0x30, 0x41, 0x35, 0x54, 0x8a, 0xbf, 0x70, 0x70, 0xfc, 0xa1, 0x7c, 0x6a, 0xf3, 0xb6, 0x38, 0x40, - 0xa2, 0xfe, 0x56, 0x01, 0xee, 0xdb, 0x6f, 0xf5, 0xd1, 0xab, 0x00, 0x7a, 0x70, 0x24, 0xf0, 0x9b, - 0xd0, 0xbc, 0x87, 0x6a, 0x80, 0x25, 0xda, 0x42, 0x61, 0x91, 0x87, 0x25, 0x22, 0x29, 0x9e, 0xf4, - 0xc2, 0x71, 0x7a, 0xd2, 0xd5, 0xff, 0x52, 0x64, 0x09, 0x22, 0x2f, 0xc9, 0x9d, 0x26, 0x41, 0xe4, - 0xbe, 0x67, 0x7a, 0x31, 0xe2, 0xfb, 0x58, 0x6e, 0x72, 0xc7, 0xed, 0x63, 0xb9, 0xf3, 0x19, 0xfb, - 0xf8, 0x37, 0x0b, 0x59, 0xa3, 0x65, 0xfb, 0xf8, 0x3a, 0x94, 0x83, 0x10, 0x97, 0x80, 0xdb, 0x2f, - 0x8c, 0xda, 0x27, 0x8e, 0x2e, 0xba, 0x7f, 0x0a, 0x4a, 0x3c, 0x1c, 0xd1, 0x42, 0x3b, 0x00, 0x46, - 0x78, 0xcc, 0x8a, 0xd5, 0xbf, 0x34, 0x1a, 0xe5, 0xe8, 0xd8, 0xae, 0xcd, 0x50, 0x7e, 0x8b, 0x7e, - 0x63, 0x89, 0x96, 0xfa, 0x99, 0x02, 0xa0, 0xe1, 0xee, 0xd2, 0x93, 0x7e, 0xcb, 0xb4, 0x8d, 0xe4, - 0x49, 0x7f, 0xd9, 0xb4, 0x0d, 0xcc, 0x20, 0xa1, 0x2e, 0x50, 0xc8, 0xd4, 0x05, 0x9e, 0x81, 0x13, - 0x1d, 0xcb, 0x59, 0xd7, 0x2c, 0x6b, 0x57, 0x04, 0xca, 0xb0, 0x53, 0x6a, 0xb2, 0x76, 0x92, 0x0a, - 0xbb, 0x8b, 0x71, 0x10, 0x4e, 0xd6, 0x45, 0x3d, 0x98, 0x75, 0x89, 0xee, 0xd8, 0xba, 0x69, 0x31, - 0x05, 0xcd, 0xe9, 0xfb, 0x39, 0xcd, 0xa4, 0x53, 0x7b, 0x83, 0xea, 0x2c, 0x4e, 0xe0, 0xc2, 0x43, - 0xd8, 0xd5, 0x2f, 0x2a, 0x50, 0x6c, 0xac, 0xb6, 0x91, 0x0a, 0xe3, 0x86, 0xd3, 0xd5, 0x4c, 0x5b, - 0x0c, 0x9f, 0xc5, 0x8c, 0x34, 0x58, 0x09, 0x16, 0x10, 0xd4, 0x83, 0x72, 0x20, 0x38, 0x46, 0xba, - 0x19, 0x68, 0xac, 0xb6, 0xc3, 0x7b, 0xd7, 0x90, 0x47, 0x82, 0x12, 0x0f, 0x47, 0x44, 0x54, 0x0d, - 0xe6, 0x1a, 0xab, 0xed, 0xa6, 0xad, 0x5b, 0x7d, 0x83, 0x2c, 0xef, 0xb0, 0x3f, 0xe8, 0xdd, 0x30, - 0x61, 0xf2, 0x12, 0x11, 0xa7, 0x52, 0xa1, 0x82, 0x5e, 0x54, 0xc2, 0x01, 0x8c, 0x56, 0x23, 0xbc, - 0x85, 0x08, 0xb2, 0x60, 0xd5, 0x04, 0x12, 0x1c, 0xc0, 0xd4, 0xb7, 0x0a, 0x50, 0x91, 0x3a, 0x84, - 0x2c, 0x98, 0xe0, 0xc3, 0x0d, 0xae, 0x33, 0x97, 0x73, 0x0e, 0x31, 0xde, 0x6b, 0x4e, 0x9d, 0x4f, - 0xa8, 0x87, 0x03, 0x12, 0xb4, 0x93, 0x3d, 0xd7, 0xec, 0x6a, 0xee, 0x2e, 0x63, 0xaa, 0x49, 0x5e, - 0xad, 0xc5, 0x8b, 0x70, 0x00, 0x43, 0x8b, 0x00, 0xfc, 0xe2, 0x96, 0x5d, 0x72, 0x14, 0xb9, 0xae, - 0x4e, 0x39, 0xbc, 0x1d, 0x96, 0x62, 0xa9, 0x06, 0xba, 0x4f, 0x30, 0x2a, 0xbf, 0xd7, 0x9d, 0x4c, - 0x30, 0xe9, 0x06, 0x94, 0x6e, 0x38, 0x36, 0xf1, 0x84, 0xb7, 0xec, 0x88, 0x06, 0x58, 0xa6, 0x92, - 0xe7, 0x25, 0x8a, 0x17, 0x73, 0xf4, 0xea, 0x36, 0x4c, 0x2e, 0xdb, 0x46, 0xcf, 0x31, 0xb9, 0x1a, - 0x7d, 0x8b, 0x20, 0x99, 0xb3, 0x50, 0xec, 0xbb, 0x96, 0xd8, 0x5b, 0x15, 0x51, 0xa1, 0xb8, 0x86, - 0xaf, 0x60, 0x5a, 0x4e, 0x4d, 0x90, 0x5e, 0xdf, 0xed, 0x39, 0x5e, 0x30, 0xfe, 0xf0, 0x88, 0x6f, - 0xf1, 0x62, 0x1c, 0xc0, 0xd5, 0x9b, 0x0a, 0xcc, 0x2e, 0xef, 0xf4, 0x4c, 0x97, 0x05, 0xb7, 0x10, - 0xd7, 0x33, 0xb9, 0x09, 0xb3, 0xcd, 0xff, 0x15, 0x7d, 0x08, 0xdb, 0x8b, 0x1a, 0x38, 0x80, 0xa3, - 0x0d, 0x98, 0x21, 0xac, 0x39, 0xdd, 0x49, 0x0d, 0x2d, 0x74, 0xe2, 0x1c, 0xc6, 0x02, 0xe3, 0xb1, - 0x53, 0x31, 0x2c, 0x38, 0x81, 0x15, 0xb5, 0x61, 0x46, 0xb7, 0x34, 0xcf, 0x33, 0x37, 0x4c, 0x3d, - 0xba, 0x8c, 0x29, 0xd7, 0x1e, 0xa6, 0x6d, 0xeb, 0x31, 0xc8, 0xcd, 0x41, 0xf5, 0xb4, 0xe8, 0x67, - 0x1c, 0x80, 0x13, 0x28, 0xd4, 0x3f, 0x57, 0xa0, 0xbc, 0xbc, 0xe3, 0x13, 0x9b, 0x0d, 0xe5, 0x9d, - 0x6f, 0xbd, 0x78, 0x30, 0x79, 0x51, 0x20, 0x45, 0x0b, 0x50, 0x30, 0x03, 0xf9, 0x0b, 0xa2, 0xaf, - 0x85, 0x66, 0x03, 0x17, 0x4c, 0x23, 0x64, 0xa0, 0x42, 0x26, 0x03, 0x49, 0x2b, 0x5c, 0xdc, 0x7f, - 0x85, 0xd5, 0xaf, 0x28, 0x50, 0xb9, 0x64, 0xae, 0x13, 0xd7, 0xe6, 0x5a, 0xce, 0xbb, 0x93, 0xe1, - 0x8e, 0x95, 0xb4, 0x50, 0x47, 0xb4, 0x03, 0x65, 0x4f, 0xdf, 0x24, 0x46, 0xdf, 0x0a, 0xef, 0xad, - 0x72, 0x05, 0x35, 0x4a, 0xa4, 0xdb, 0x02, 0x9f, 0x14, 0xac, 0x11, 0x50, 0xc0, 0x11, 0x31, 0xf5, - 0xa7, 0xe1, 0x64, 0x4a, 0x23, 0x54, 0x85, 0x92, 0xe7, 0x6b, 0xae, 0x2f, 0xe6, 0x8c, 0x6d, 0xc1, - 0x36, 0x2d, 0xc0, 0xbc, 0x1c, 0xdd, 0x03, 0x45, 0x62, 0x1b, 0x62, 0xd2, 0x26, 0xe8, 0x86, 0x5a, - 0xb6, 0x0d, 0x4c, 0xcb, 0xd0, 0x43, 0x30, 0x69, 0x39, 0x31, 0xbe, 0x63, 0x97, 0x1b, 0x57, 0x44, - 0x19, 0x0e, 0xa1, 0xea, 0xaf, 0x8d, 0xc3, 0xd9, 0x4b, 0x8e, 0x6b, 0xde, 0xa0, 0x66, 0xbd, 0xd5, - 0x72, 0x8c, 0xc8, 0x03, 0x2b, 0x1c, 0xe1, 0xbf, 0xa8, 0xc0, 0xdd, 0x7a, 0xaf, 0xdf, 0xb4, 0x4d, - 0xdf, 0xd4, 0x02, 0xc7, 0x58, 0x8b, 0xb8, 0xa6, 0x93, 0xd7, 0x11, 0xcb, 0x22, 0xe2, 0xea, 0xad, - 0xb5, 0x34, 0x94, 0x38, 0x8b, 0x16, 0x7a, 0x05, 0x66, 0x0c, 0xe7, 0xba, 0xcd, 0xdd, 0x74, 0xc4, - 0xd2, 0x76, 0x73, 0xba, 0x5f, 0x19, 0xe3, 0x36, 0x62, 0x98, 0x70, 0x02, 0x33, 0xf3, 0x3d, 0x87, - 0x45, 0x6d, 0x9f, 0x45, 0x04, 0xde, 0x88, 0xa6, 0x33, 0xa7, 0xef, 0xb9, 0x91, 0x8a, 0x11, 0x67, - 0x50, 0x42, 0x9f, 0x82, 0xd3, 0x26, 0x9f, 0x08, 0x4c, 0x34, 0xc3, 0xb4, 0x89, 0xe7, 0xf1, 0x71, - 0x8f, 0xe0, 0x5c, 0x6d, 0xa6, 0x21, 0xc4, 0xe9, 0x74, 0xd0, 0xcb, 0x00, 0xde, 0xae, 0xad, 0x8b, - 0xb5, 0x2e, 0xe5, 0xa2, 0xca, 0x4f, 0xb2, 0x10, 0x0b, 0x96, 0x30, 0xa2, 0x87, 0xa1, 0xec, 0x3b, - 0x16, 0x71, 0x35, 0x5b, 0xe7, 0xbe, 0x27, 0x85, 0xdf, 0x70, 0x5f, 0x0b, 0x0a, 0x71, 0x04, 0x47, - 0x06, 0x4c, 0xf5, 0x7b, 0xd2, 0xe2, 0x4f, 0xe4, 0x77, 0xc0, 0xae, 0x49, 0x78, 0x70, 0x0c, 0xab, - 0xfa, 0x85, 0x49, 0x60, 0xd1, 0xc7, 0x4b, 0xad, 0x66, 0x9b, 0xb8, 0xdb, 0xe1, 0x26, 0x78, 0x5d, - 0x81, 0xd9, 0xc8, 0x77, 0x29, 0x84, 0xa9, 0x92, 0xff, 0x46, 0xf6, 0x72, 0x02, 0x57, 0x6d, 0x5e, - 0x88, 0x88, 0xd9, 0x24, 0x04, 0x0f, 0xd1, 0x45, 0x9f, 0x51, 0x60, 0x56, 0x8b, 0x47, 0x1f, 0x07, - 0x22, 0x2b, 0x57, 0x58, 0x4c, 0x22, 0x92, 0x39, 0xea, 0x4b, 0x02, 0xe0, 0xe1, 0x21, 0xb2, 0xe8, - 0x71, 0x98, 0xd2, 0x7a, 0xe6, 0x52, 0xdf, 0x30, 0xa9, 0x6d, 0x1e, 0x44, 0xa7, 0xb2, 0x69, 0x5e, - 0x6a, 0x35, 0xc3, 0x72, 0x1c, 0xab, 0x15, 0x46, 0x12, 0x8b, 0x89, 0x1c, 0x1b, 0x31, 0x92, 0x58, - 0xcc, 0x61, 0x14, 0x49, 0x2c, 0xa6, 0x4e, 0x26, 0x82, 0x3e, 0x01, 0xf7, 0x70, 0x59, 0x5f, 0xd3, - 0x3c, 0x53, 0x5f, 0xea, 0xfb, 0x9b, 0xc4, 0xf6, 0x83, 0xc3, 0xb9, 0xc4, 0x4e, 0x06, 0x76, 0x33, - 0xb3, 0x9c, 0x55, 0x09, 0x67, 0xb7, 0x47, 0x36, 0x80, 0x63, 0x1a, 0xba, 0x18, 0x0f, 0xbf, 0x1c, - 0xc8, 0x75, 0x71, 0x76, 0xb5, 0xd9, 0xa8, 0x8b, 0xe1, 0xb0, 0xad, 0x13, 0xfd, 0xc6, 0x12, 0x05, - 0xf4, 0x79, 0x05, 0xa6, 0xc5, 0x23, 0x01, 0x41, 0x73, 0x82, 0xad, 0xff, 0x4b, 0x79, 0x99, 0x31, - 0xc1, 0xf0, 0x8b, 0x58, 0x46, 0xce, 0x03, 0x7d, 0xc2, 0x2b, 0xe3, 0x18, 0x0c, 0xc7, 0xfb, 0x81, - 0x7e, 0x57, 0x81, 0x53, 0x1e, 0x71, 0xb7, 0x4d, 0x9d, 0x2c, 0xe9, 0xba, 0xd3, 0xb7, 0x83, 0x45, - 0x9e, 0xcc, 0x6f, 0x05, 0xb6, 0x53, 0xf0, 0xd5, 0xe6, 0xf7, 0x06, 0xd5, 0x53, 0x69, 0x10, 0x9c, - 0x4a, 0x7f, 0xe1, 0x79, 0x40, 0xc3, 0x83, 0x42, 0xb3, 0x50, 0xdc, 0x22, 0x3c, 0x96, 0xbd, 0x8c, - 0xe9, 0xbf, 0xe8, 0x14, 0x94, 0xb6, 0x35, 0xab, 0xcf, 0xb5, 0x91, 0x49, 0xcc, 0x7f, 0x3c, 0x55, - 0xf8, 0x90, 0xa2, 0x7e, 0xa9, 0x08, 0xf7, 0xd2, 0xb9, 0x8a, 0xec, 0xcb, 0x15, 0xcd, 0xd6, 0x3a, - 0xef, 0x4c, 0x21, 0xf1, 0x15, 0x05, 0xee, 0xde, 0x4c, 0x3f, 0xd8, 0xc5, 0xc1, 0xf9, 0xd1, 0x5c, - 0xea, 0xcd, 0x7e, 0xba, 0x02, 0xbf, 0x67, 0xdb, 0xb7, 0x0a, 0xce, 0xea, 0x14, 0x7a, 0x1e, 0x66, - 0x6d, 0xc7, 0x20, 0xf5, 0x66, 0x03, 0xaf, 0x68, 0xde, 0x56, 0xdb, 0xbc, 0xc1, 0xad, 0x81, 0x12, - 0xb7, 0x77, 0x57, 0x13, 0x30, 0x3c, 0x54, 0x5b, 0xfd, 0xb6, 0x02, 0x27, 0xe8, 0xcc, 0xb4, 0x5c, - 0x67, 0x67, 0xf7, 0x9d, 0xb8, 0x26, 0xef, 0x85, 0xb1, 0xae, 0x63, 0x04, 0x7a, 0xee, 0x69, 0xaa, - 0xe3, 0xae, 0x38, 0x06, 0xb9, 0xc9, 0x4d, 0xe4, 0x9d, 0x5d, 0xfa, 0x03, 0xb3, 0x2a, 0xea, 0x1f, - 0x29, 0xfc, 0x20, 0x0a, 0xd4, 0xc1, 0x77, 0x22, 0x8f, 0xa9, 0x03, 0x80, 0x69, 0x5a, 0xcd, 0x22, - 0xfe, 0x3b, 0x71, 0xba, 0x1f, 0x81, 0x8a, 0xde, 0xeb, 0xd7, 0x2f, 0xb4, 0x3f, 0xda, 0x77, 0x7c, - 0x4d, 0x18, 0xe1, 0xec, 0x90, 0xa8, 0xb7, 0xd6, 0x82, 0x62, 0x2c, 0xd7, 0xa1, 0x4c, 0xa8, 0xf7, - 0xfa, 0x62, 0x5b, 0x8b, 0x77, 0x2e, 0x5c, 0x81, 0x66, 0x4c, 0x58, 0x6f, 0xad, 0xc5, 0x60, 0x78, - 0xa8, 0x36, 0xfa, 0x14, 0x4c, 0x91, 0x6d, 0x53, 0xa7, 0xa7, 0xc2, 0x25, 0xcd, 0x35, 0xc4, 0xd9, - 0xd6, 0xcc, 0x3b, 0xf8, 0x70, 0x6a, 0x97, 0x05, 0x52, 0x7e, 0xb6, 0x2e, 0x4b, 0x24, 0x70, 0x8c, - 0x20, 0x3b, 0xe7, 0xc4, 0xef, 0x15, 0x6d, 0xa7, 0xe5, 0x18, 0x17, 0x5d, 0x4d, 0x27, 0x92, 0x12, - 0x57, 0x12, 0xe7, 0x5c, 0x56, 0x25, 0x9c, 0xdd, 0x1e, 0x7d, 0x59, 0x81, 0x33, 0x21, 0xd4, 0xb4, - 0xcd, 0x6e, 0xbf, 0x8b, 0x89, 0x6e, 0x69, 0x66, 0x57, 0x1c, 0x7a, 0x2f, 0x1e, 0xd9, 0x40, 0xe3, - 0xe8, 0xb9, 0x06, 0x9d, 0x0e, 0xc3, 0x19, 0x5d, 0x42, 0x7f, 0xa8, 0xc0, 0xfd, 0x01, 0xa8, 0xe5, - 0x12, 0xcf, 0xeb, 0xbb, 0x24, 0xba, 0x91, 0x15, 0x53, 0x92, 0x4f, 0x91, 0x7c, 0xd7, 0xde, 0xa0, - 0x7a, 0xff, 0xf2, 0x2d, 0x70, 0xe3, 0x5b, 0x52, 0x97, 0xd9, 0xa5, 0xed, 0x6c, 0xf8, 0xe2, 0x94, - 0xbc, 0x5d, 0xec, 0x42, 0x49, 0xe0, 0x18, 0x41, 0xf4, 0x55, 0x05, 0xee, 0x96, 0x0b, 0x64, 0x6e, - 0x29, 0xb3, 0xce, 0x7c, 0xec, 0xc8, 0x3a, 0x93, 0xc0, 0xcf, 0x0d, 0xc1, 0x0c, 0x20, 0xce, 0xea, - 0x15, 0x35, 0xe8, 0xbb, 0x8c, 0x31, 0xbd, 0x79, 0x60, 0xec, 0xcc, 0x0c, 0x7a, 0xce, 0xab, 0x1e, - 0x0e, 0x60, 0x54, 0x33, 0xed, 0x39, 0x46, 0xcb, 0x34, 0xbc, 0x2b, 0x66, 0xd7, 0xf4, 0xe7, 0x2b, - 0x2c, 0x18, 0x96, 0x4d, 0x47, 0xcb, 0x31, 0x5a, 0xcd, 0x06, 0x2f, 0xc7, 0xb1, 0x5a, 0x2c, 0xe0, - 0xc7, 0xec, 0x6a, 0x1d, 0xd2, 0xea, 0x5b, 0x56, 0xcb, 0x75, 0xd8, 0x7b, 0xbf, 0x06, 0xd1, 0x0c, - 0xcb, 0xb4, 0xc9, 0xfc, 0x54, 0xfe, 0x80, 0x9f, 0x66, 0x16, 0x52, 0x9c, 0x4d, 0x4f, 0x1d, 0x14, - 0xe0, 0x74, 0xea, 0x4c, 0xa2, 0x67, 0xe0, 0x44, 0x97, 0x74, 0x1d, 0x77, 0x77, 0x29, 0x78, 0xe9, - 0x25, 0xfc, 0x04, 0xcc, 0x19, 0xbd, 0x12, 0x07, 0xe1, 0x64, 0x5d, 0x2a, 0xe7, 0x18, 0xd5, 0x0b, - 0xed, 0xa8, 0x7d, 0x21, 0x92, 0x73, 0xcd, 0x04, 0x0c, 0x0f, 0xd5, 0x46, 0x75, 0x98, 0x13, 0x65, - 0x4d, 0x7a, 0x10, 0x7b, 0x17, 0x5c, 0x12, 0x78, 0xef, 0xe8, 0xc1, 0x36, 0xd7, 0x4c, 0x02, 0xf1, - 0x70, 0x7d, 0x3a, 0x0a, 0xfa, 0x43, 0xee, 0xc5, 0x58, 0x34, 0x8a, 0xd5, 0x38, 0x08, 0x27, 0xeb, - 0x06, 0x2a, 0x43, 0xac, 0x0b, 0xa5, 0x68, 0x14, 0xab, 0x09, 0x18, 0x1e, 0xaa, 0xad, 0x7e, 0x7f, - 0x0c, 0x1e, 0x3c, 0x80, 0xf4, 0x41, 0xdd, 0xf4, 0xe9, 0xbe, 0x05, 0x2f, 0x2c, 0x06, 0x37, 0x22, - 0x8b, 0x1f, 0xed, 0x6b, 0xb6, 0x6f, 0xfa, 0xbb, 0x07, 0x5c, 0x9e, 0x5e, 0xc6, 0xf2, 0x1c, 0x9e, - 0xde, 0x41, 0x97, 0xd3, 0xcb, 0x5a, 0xce, 0xc3, 0x93, 0x3c, 0xf8, 0xf2, 0x77, 0xd3, 0x97, 0x3f, - 0xe7, 0xac, 0xde, 0x92, 0x5d, 0x7a, 0x19, 0xec, 0x92, 0x73, 0x56, 0x0f, 0xc0, 0x5e, 0xff, 0x32, - 0x06, 0xef, 0x3a, 0x88, 0x24, 0xcc, 0xc9, 0x5f, 0x29, 0xb2, 0xe6, 0xb6, 0xf2, 0x57, 0xd6, 0x5d, - 0xd4, 0x6d, 0xe4, 0xaf, 0x14, 0x92, 0xb7, 0x9b, 0xbf, 0xb2, 0x66, 0xf5, 0x76, 0xf1, 0x57, 0xd6, - 0xac, 0x1e, 0x80, 0xbf, 0xfe, 0x74, 0x02, 0xa4, 0xf0, 0x3a, 0xaa, 0xfa, 0x69, 0x96, 0xe5, 0x5c, - 0x6f, 0xb9, 0xe6, 0xb6, 0x69, 0x91, 0x0e, 0x31, 0xc2, 0x68, 0x2d, 0x4f, 0x38, 0xbf, 0xd9, 0x59, - 0xb4, 0x94, 0x55, 0x09, 0x67, 0xb7, 0xa7, 0xaa, 0xfd, 0x9c, 0x9e, 0x8c, 0xd2, 0x15, 0x5c, 0xb3, - 0x9c, 0x2f, 0x30, 0x31, 0x81, 0x8c, 0xaf, 0xec, 0x50, 0x31, 0x1e, 0x26, 0x8b, 0x3e, 0xad, 0xc0, - 0xf4, 0x96, 0xec, 0xb6, 0x10, 0xbc, 0x74, 0xf1, 0x88, 0xfc, 0x1f, 0xfc, 0x79, 0x4f, 0x0c, 0x80, - 0xe3, 0x04, 0xa9, 0x72, 0x79, 0x7a, 0x2b, 0xcd, 0x1b, 0x20, 0x78, 0xec, 0x6a, 0xde, 0xae, 0x64, - 0xb8, 0x17, 0xb8, 0x03, 0x37, 0xb5, 0x02, 0x4e, 0xef, 0x48, 0x38, 0x4b, 0xa1, 0x11, 0x29, 0xd8, - 0x31, 0xf7, 0x2c, 0x25, 0xac, 0xd1, 0x68, 0x96, 0x42, 0x00, 0x8e, 0x13, 0x44, 0x3d, 0x28, 0x6f, - 0x05, 0x26, 0xb9, 0x30, 0x11, 0xea, 0x79, 0xa9, 0x4b, 0x76, 0x3d, 0x77, 0x14, 0x87, 0x85, 0x38, - 0x22, 0x82, 0x36, 0x61, 0x62, 0x8b, 0x8b, 0x5c, 0xa1, 0xda, 0x2f, 0x8d, 0xac, 0xbf, 0x72, 0x0d, - 0x53, 0x14, 0xe1, 0x00, 0xbd, 0x7c, 0x29, 0x35, 0x79, 0x8b, 0x4b, 0xa9, 0x1f, 0x29, 0x30, 0x64, - 0xb1, 0xa2, 0x37, 0x14, 0x98, 0xda, 0x20, 0x9a, 0xdf, 0x77, 0xc9, 0x45, 0xcd, 0x0f, 0x43, 0x34, - 0x5e, 0x38, 0x0a, 0x43, 0x79, 0xf1, 0x82, 0x84, 0x98, 0xfb, 0xef, 0xc2, 0x57, 0x17, 0x32, 0x08, - 0xc7, 0x7a, 0xb0, 0xf0, 0x1c, 0xcc, 0x0d, 0x35, 0x3c, 0x94, 0x8f, 0xec, 0x6f, 0x84, 0xdf, 0x22, - 0x99, 0xa9, 0xe3, 0x65, 0x28, 0x69, 0x86, 0x11, 0x3e, 0xe4, 0x7d, 0x32, 0x9f, 0x9f, 0xda, 0x90, - 0x23, 0x61, 0xd8, 0x4f, 0xcc, 0xd1, 0xa2, 0x0b, 0x80, 0xb4, 0x98, 0x4b, 0x76, 0xc5, 0x31, 0x02, - 0x95, 0x98, 0xbd, 0x6b, 0x5d, 0x1a, 0x82, 0xe2, 0x94, 0x16, 0xea, 0xd3, 0x30, 0x13, 0x8f, 0x63, - 0x3e, 0xc4, 0xe5, 0xb2, 0xfa, 0x2b, 0x0a, 0xa0, 0xe1, 0x47, 0x3e, 0xc8, 0x85, 0x49, 0x51, 0x23, - 0x58, 0xe2, 0x5c, 0xbe, 0x90, 0xe4, 0xb5, 0x77, 0x14, 0x8a, 0x24, 0x0a, 0x3c, 0x1c, 0xd2, 0x51, - 0xff, 0x4f, 0x81, 0xe8, 0xa5, 0x20, 0x7a, 0x02, 0x2a, 0x06, 0xf1, 0x74, 0xd7, 0xec, 0xf9, 0xd1, - 0x38, 0xc2, 0x87, 0x19, 0x8d, 0x08, 0x84, 0xe5, 0x7a, 0x48, 0x85, 0x71, 0x5f, 0xf3, 0xb6, 0x9a, - 0x0d, 0x61, 0x1b, 0xb0, 0xc0, 0x91, 0x6b, 0xac, 0x04, 0x0b, 0x08, 0x7a, 0x1f, 0x94, 0x74, 0x7a, - 0x1e, 0x09, 0xcf, 0xff, 0x19, 0x16, 0x5f, 0x4e, 0x0b, 0x6e, 0x0e, 0xaa, 0x65, 0x46, 0x9d, 0xfe, - 0xc2, 0xbc, 0x12, 0xda, 0x38, 0x82, 0x00, 0x68, 0x74, 0xeb, 0xe0, 0x67, 0xf5, 0x1f, 0x0a, 0x10, - 0x7f, 0x9c, 0x99, 0x77, 0x0a, 0x86, 0x23, 0xb6, 0x0b, 0xb7, 0x2d, 0x62, 0xfb, 0x7d, 0x2c, 0x07, - 0x02, 0xcf, 0x64, 0xc3, 0x3d, 0x9f, 0x72, 0xe6, 0x02, 0x9e, 0x87, 0x26, 0xac, 0x81, 0x9e, 0x64, - 0xf7, 0xc3, 0x7e, 0x60, 0x31, 0x3d, 0x18, 0x6c, 0x8b, 0x36, 0x2d, 0xbc, 0x29, 0x62, 0xd2, 0xc3, - 0xf1, 0xb3, 0x52, 0xcc, 0x5b, 0xa0, 0x27, 0x44, 0xe4, 0x40, 0x29, 0x16, 0x37, 0x1f, 0x04, 0xdb, - 0xcf, 0xc5, 0x1a, 0x46, 0xe1, 0x04, 0xea, 0x6f, 0x2b, 0x30, 0x21, 0x5e, 0x6c, 0x1d, 0x20, 0xf8, - 0x60, 0x03, 0x4a, 0x4c, 0x23, 0x1b, 0x45, 0x45, 0x68, 0x6f, 0x3a, 0x8e, 0x1f, 0x7b, 0xb7, 0xc6, - 0xae, 0xc1, 0xd9, 0xbf, 0x98, 0xa3, 0x57, 0xff, 0x44, 0x81, 0x29, 0xb9, 0xca, 0x01, 0xc2, 0x51, - 0xe4, 0x0d, 0x59, 0x38, 0xa6, 0x0d, 0xf9, 0xcd, 0x22, 0x54, 0xa4, 0xe7, 0x6e, 0x68, 0x05, 0x8a, - 0x7a, 0xaf, 0x9f, 0xd3, 0x8a, 0x0c, 0x43, 0x68, 0xea, 0xad, 0x35, 0x4c, 0xf1, 0x50, 0x74, 0x9d, - 0x5e, 0x3f, 0xa7, 0x91, 0x18, 0xa2, 0xbb, 0x48, 0xd1, 0x75, 0x7a, 0x7d, 0xf4, 0x02, 0x8c, 0x73, - 0x9b, 0x21, 0xa7, 0x0d, 0x18, 0x86, 0x59, 0x73, 0x53, 0x04, 0x0b, 0x6c, 0xe1, 0xda, 0x8c, 0x65, - 0xae, 0x4d, 0x17, 0x26, 0x3c, 0xdf, 0x71, 0x29, 0xe3, 0x94, 0xf2, 0x3f, 0xb5, 0x94, 0x66, 0xba, - 0xcd, 0xb1, 0xf1, 0x33, 0x5c, 0xfc, 0xc0, 0x01, 0x0d, 0x2a, 0xe2, 0xfa, 0x1e, 0xb3, 0x0c, 0xc6, - 0x99, 0x7e, 0xcc, 0x44, 0xdc, 0x1a, 0x2b, 0xc1, 0x02, 0xa2, 0x7e, 0x43, 0x01, 0x34, 0x8c, 0x10, - 0x3d, 0x08, 0x25, 0x16, 0x9f, 0x23, 0x18, 0x4d, 0x7a, 0x5d, 0xa3, 0x79, 0x1e, 0xe6, 0x30, 0xf4, - 0x22, 0x8c, 0x79, 0xe6, 0x8d, 0xbc, 0xd6, 0x7b, 0x28, 0x9f, 0x04, 0x4d, 0x76, 0xf1, 0xc1, 0x10, - 0x86, 0x1b, 0xb0, 0x98, 0xb5, 0x01, 0xd5, 0x5f, 0x28, 0x50, 0x8e, 0x33, 0x6d, 0x9f, 0xd8, 0xec, - 0x06, 0x7d, 0x17, 0x40, 0xeb, 0xfb, 0x0e, 0x17, 0x38, 0x82, 0xf1, 0x9a, 0xf9, 0x26, 0x37, 0x44, - 0xba, 0x14, 0x22, 0xe4, 0xd7, 0x95, 0xd1, 0x6f, 0x2c, 0x11, 0xa3, 0xa4, 0x7d, 0xb3, 0x4b, 0x5e, - 0x34, 0x6d, 0xc3, 0xb9, 0x2e, 0xe6, 0x62, 0x54, 0xd2, 0xd7, 0x42, 0x84, 0x9c, 0x74, 0xf4, 0x1b, - 0x4b, 0xc4, 0xd4, 0xaf, 0x2a, 0x70, 0x3a, 0xb5, 0xc3, 0xe8, 0x22, 0xcc, 0x45, 0x57, 0x06, 0x2f, - 0x48, 0x47, 0xfc, 0x64, 0xf4, 0xd6, 0xfc, 0x72, 0xb2, 0x02, 0x1e, 0x6e, 0x83, 0x56, 0xe0, 0xa4, - 0xfc, 0x6c, 0x36, 0x40, 0xc5, 0xef, 0x1b, 0xee, 0x15, 0xa8, 0x4e, 0xae, 0x0c, 0x57, 0xc1, 0x69, - 0xed, 0xd4, 0x4f, 0xc4, 0x3a, 0x1c, 0x0d, 0x8b, 0x32, 0xdc, 0x3a, 0xe9, 0x84, 0x61, 0x9c, 0x21, - 0xc3, 0xd5, 0x68, 0x21, 0xe6, 0x30, 0x74, 0x56, 0x8e, 0x0a, 0x0a, 0x37, 0x76, 0x10, 0x19, 0xa4, - 0xfa, 0x00, 0x2b, 0x8e, 0x6d, 0xfa, 0x8e, 0x6b, 0xda, 0x1d, 0xb4, 0x01, 0x93, 0x9a, 0xc8, 0x14, - 0x26, 0x18, 0xe2, 0xc3, 0xb9, 0x14, 0x33, 0x81, 0x83, 0x47, 0x19, 0x05, 0xbf, 0x70, 0x88, 0x5b, - 0xfd, 0x7c, 0x01, 0x60, 0x95, 0xf8, 0xd7, 0x1d, 0x77, 0x8b, 0x92, 0x7d, 0xc7, 0x47, 0xae, 0xa1, - 0xfb, 0x60, 0xac, 0xe7, 0x18, 0x9e, 0xd8, 0x5d, 0x2c, 0xc8, 0x92, 0x79, 0x97, 0x59, 0x29, 0xaa, - 0x42, 0x89, 0xd9, 0xe0, 0x42, 0x8c, 0xb1, 0x33, 0x89, 0x5a, 0xeb, 0x1e, 0xe6, 0xe5, 0x3c, 0x6d, - 0x05, 0xbb, 0x83, 0xf6, 0xc4, 0x21, 0x2b, 0xd2, 0x56, 0xf0, 0x32, 0x1c, 0x42, 0xd5, 0xd7, 0xc6, - 0x20, 0x96, 0x68, 0x2e, 0x52, 0x94, 0x95, 0xdb, 0xa3, 0x28, 0x7f, 0x0c, 0xe6, 0x2d, 0x47, 0x33, - 0x6a, 0x9a, 0x45, 0xb9, 0xcb, 0x6d, 0x33, 0x89, 0x83, 0x35, 0xbb, 0x13, 0x26, 0x2c, 0xbb, 0x6f, - 0x6f, 0x50, 0x9d, 0xbf, 0x92, 0x51, 0x07, 0x67, 0xb6, 0x46, 0x7e, 0x98, 0xde, 0x8e, 0x3f, 0x52, - 0xbf, 0x32, 0x6a, 0x1a, 0xbe, 0x45, 0x39, 0xfa, 0x20, 0x3c, 0x51, 0xe2, 0xc9, 0xee, 0xd0, 0x2f, - 0x29, 0x70, 0x9a, 0xec, 0xf8, 0xc4, 0xb5, 0x35, 0xeb, 0x9a, 0xab, 0x6d, 0x6c, 0x98, 0xba, 0xb8, - 0xb7, 0xe3, 0x8b, 0xd3, 0xa2, 0x56, 0xf3, 0x72, 0x5a, 0x85, 0x9b, 0x83, 0xea, 0x63, 0xc3, 0x59, - 0x26, 0x83, 0x60, 0x83, 0xd4, 0x26, 0x8c, 0x43, 0xd3, 0xc9, 0x2d, 0x3c, 0x09, 0x95, 0x43, 0x04, - 0x16, 0x94, 0x65, 0xa3, 0xe9, 0xf7, 0xc7, 0x41, 0x0a, 0xf4, 0x38, 0x44, 0x6a, 0x82, 0x3f, 0x56, - 0xe0, 0x94, 0x6e, 0x99, 0xc4, 0xf6, 0x13, 0xf1, 0x2c, 0x7c, 0xb7, 0xac, 0xe5, 0x8a, 0x40, 0xe9, - 0x11, 0xbb, 0xd9, 0xa8, 0x3b, 0xb6, 0x4d, 0x74, 0xbf, 0x9e, 0x82, 0x9c, 0x47, 0x5e, 0xa4, 0x41, - 0x70, 0x6a, 0x67, 0xd8, 0x78, 0x58, 0x79, 0xb3, 0x21, 0x47, 0x23, 0xd6, 0x45, 0x19, 0x0e, 0xa1, - 0xe8, 0x11, 0xa8, 0x74, 0x5c, 0xa7, 0xdf, 0xf3, 0xea, 0xec, 0x4a, 0x91, 0xaf, 0x20, 0xbb, 0xb1, - 0xbd, 0x18, 0x15, 0x63, 0xb9, 0x0e, 0x7a, 0x1c, 0xa6, 0xf8, 0xcf, 0x96, 0x4b, 0x36, 0xcc, 0x1d, - 0xb1, 0xdd, 0xd8, 0x35, 0xcf, 0x45, 0xa9, 0x1c, 0xc7, 0x6a, 0xa1, 0x87, 0xa1, 0x6c, 0x7a, 0x5e, - 0x9f, 0xb8, 0x6b, 0xf8, 0x8a, 0x78, 0xf6, 0xc8, 0x3c, 0x0a, 0xcd, 0xa0, 0x10, 0x47, 0x70, 0xf4, - 0x59, 0x05, 0x66, 0x5c, 0xf2, 0x6a, 0xdf, 0x74, 0x89, 0xc1, 0x88, 0x7a, 0x22, 0xda, 0x06, 0x8f, - 0x16, 0xe1, 0xb3, 0x88, 0x63, 0x48, 0x39, 0x9f, 0x87, 0xc6, 0x42, 0x1c, 0x88, 0x13, 0x3d, 0xa0, - 0x53, 0xe5, 0x99, 0x1d, 0xdb, 0xb4, 0x3b, 0x4b, 0x56, 0xc7, 0x9b, 0x9f, 0x64, 0x5b, 0x97, 0x4d, - 0x55, 0x3b, 0x2a, 0xc6, 0x72, 0x1d, 0xf4, 0x41, 0x98, 0xee, 0x7b, 0x94, 0x73, 0xbb, 0x84, 0xcf, - 0x6f, 0x99, 0x0d, 0x9c, 0x39, 0x71, 0xd6, 0x64, 0x00, 0x8e, 0xd7, 0x43, 0x4f, 0xc1, 0x4c, 0x50, - 0x20, 0x66, 0x19, 0x78, 0xf6, 0x1e, 0xda, 0xcf, 0xb5, 0x18, 0x04, 0x27, 0x6a, 0x2e, 0x2c, 0xc1, - 0xc9, 0x94, 0x61, 0x1e, 0x6a, 0x7b, 0xfc, 0x59, 0x01, 0x1e, 0xb8, 0x25, 0x57, 0xa2, 0x2f, 0x29, - 0x50, 0x21, 0x3b, 0xbe, 0xab, 0x85, 0x51, 0x07, 0x74, 0x89, 0x36, 0x6e, 0xcb, 0x16, 0x58, 0x5c, - 0x8e, 0x08, 0xf1, 0x65, 0x0b, 0x35, 0x35, 0x09, 0x82, 0xe5, 0xfe, 0x50, 0x4d, 0x93, 0x47, 0xf1, - 0xcb, 0xc6, 0xb4, 0x48, 0xa8, 0x25, 0x20, 0x0b, 0xcf, 0xc2, 0x6c, 0x12, 0xf3, 0xa1, 0x66, 0xea, - 0xeb, 0x05, 0x28, 0xb5, 0x2c, 0xcd, 0x3e, 0x8e, 0xd4, 0x8e, 0x3f, 0x15, 0x7b, 0xdb, 0x95, 0xeb, - 0x15, 0x1d, 0xeb, 0x6a, 0xe6, 0x63, 0xd0, 0x4e, 0xe2, 0x31, 0xe8, 0x73, 0xf9, 0x49, 0xec, 0xff, - 0xf6, 0xf3, 0xdb, 0x0a, 0x94, 0x59, 0xbd, 0x63, 0x78, 0x22, 0xf6, 0x72, 0xfc, 0x89, 0xd8, 0x93, - 0xb9, 0xc7, 0x94, 0xf1, 0x22, 0xec, 0x7b, 0xc1, 0x58, 0xd8, 0x03, 0xb0, 0x8f, 0xcb, 0xd9, 0x05, - 0xf9, 0x60, 0x1e, 0x4a, 0x7b, 0x62, 0x79, 0xc5, 0xd1, 0x35, 0x2b, 0xf9, 0xce, 0x72, 0xff, 0x14, - 0x83, 0x5d, 0x28, 0x13, 0xf1, 0x00, 0x24, 0x18, 0x4c, 0x2e, 0xdd, 0x31, 0x78, 0x45, 0x12, 0x91, - 0x0b, 0x4a, 0x3c, 0x1c, 0x51, 0x50, 0xff, 0xba, 0x00, 0x15, 0x69, 0x2d, 0xdf, 0x8e, 0x97, 0x9c, - 0x17, 0x52, 0x73, 0x6c, 0x15, 0x58, 0x58, 0xc1, 0x99, 0x43, 0xe4, 0xe9, 0x73, 0xa1, 0xa2, 0x47, - 0x39, 0x1e, 0x46, 0x61, 0x6e, 0x29, 0x55, 0x84, 0x88, 0x6b, 0x8a, 0x0a, 0xb0, 0x4c, 0x44, 0xfd, - 0xcb, 0x02, 0x4c, 0xb4, 0x5c, 0x87, 0x2e, 0xf0, 0x31, 0x88, 0x06, 0x2d, 0x26, 0x1a, 0x9e, 0xcb, - 0xa9, 0xb3, 0x53, 0xc4, 0x99, 0xc2, 0xc1, 0x4c, 0x08, 0x87, 0xa5, 0x51, 0x88, 0xec, 0x2f, 0x1e, - 0xfe, 0x5e, 0x81, 0x8a, 0xa8, 0x79, 0x0c, 0x02, 0xe2, 0x93, 0x71, 0x01, 0xf1, 0xf4, 0x08, 0xe3, - 0xca, 0x10, 0x11, 0x5f, 0x50, 0x60, 0x5a, 0xd4, 0x58, 0x21, 0xdd, 0x75, 0xe2, 0xa2, 0x0b, 0x30, - 0xe1, 0xf5, 0xd9, 0x42, 0x8a, 0x01, 0xdd, 0x2b, 0x0b, 0x09, 0x77, 0x5d, 0xd3, 0x59, 0xba, 0x48, - 0x5e, 0x45, 0x7a, 0x7f, 0xcd, 0x0b, 0x70, 0xd0, 0x98, 0xda, 0x75, 0xae, 0x63, 0x0d, 0xbd, 0xe3, - 0xc1, 0x8e, 0x45, 0x30, 0x83, 0x50, 0xcb, 0x89, 0xfe, 0x0d, 0xbc, 0xc5, 0xcc, 0x72, 0xa2, 0x60, - 0x0f, 0xf3, 0x72, 0xf5, 0xf5, 0x62, 0x38, 0xd9, 0x4c, 0x82, 0x5d, 0x82, 0xb2, 0xee, 0x12, 0xcd, - 0x27, 0x46, 0x6d, 0xf7, 0x20, 0x9d, 0x63, 0x5a, 0x5c, 0x3d, 0x68, 0x81, 0xa3, 0xc6, 0x54, 0x61, - 0x92, 0x1d, 0xc0, 0x85, 0x48, 0xb7, 0xcc, 0x74, 0xfe, 0x7e, 0x18, 0x4a, 0xce, 0x75, 0x3b, 0xbc, - 0x5c, 0xdc, 0x97, 0x30, 0x1b, 0xca, 0x55, 0x5a, 0x1b, 0xf3, 0x46, 0xec, 0xfd, 0x9f, 0x78, 0xd5, - 0xc6, 0x15, 0xd9, 0x4a, 0xda, 0x8b, 0x36, 0x64, 0xc1, 0x44, 0x97, 0x2d, 0x03, 0x35, 0x15, 0x8b, - 0x23, 0xb2, 0x32, 0x5f, 0x50, 0x39, 0x85, 0x07, 0xc3, 0x8c, 0x03, 0x12, 0x54, 0xf1, 0xa5, 0xca, - 0x99, 0xd7, 0xd3, 0x74, 0x22, 0x2b, 0xbe, 0xab, 0x41, 0x21, 0x8e, 0xe0, 0xea, 0x1b, 0x11, 0xa7, - 0x08, 0xb1, 0x9b, 0x9e, 0x67, 0x50, 0xc9, 0x95, 0xaf, 0xf4, 0x31, 0x28, 0xf5, 0x36, 0x35, 0x2f, - 0x60, 0x97, 0x20, 0xe7, 0x4b, 0xa9, 0x45, 0x0b, 0x6f, 0x0e, 0xaa, 0x53, 0x82, 0x34, 0xfb, 0x8d, - 0x79, 0x5d, 0xf5, 0xeb, 0x45, 0x98, 0x3c, 0x44, 0x4e, 0xe2, 0xd7, 0x15, 0x40, 0x3a, 0xbf, 0x17, - 0xa5, 0xa7, 0x07, 0x39, 0x72, 0x67, 0x02, 0x13, 0xfc, 0xf5, 0x21, 0x0a, 0x38, 0x85, 0x2a, 0x7a, - 0x43, 0x81, 0x53, 0xa6, 0xbd, 0xe1, 0x6a, 0x9e, 0xef, 0xf6, 0x75, 0xbf, 0xef, 0x06, 0xdd, 0x29, - 0x1e, 0x59, 0x77, 0x98, 0x69, 0xd6, 0x4c, 0xa1, 0x81, 0x53, 0x29, 0x23, 0x02, 0x13, 0xd7, 0x1d, - 0x77, 0x8b, 0x32, 0xdf, 0x58, 0xfe, 0x2c, 0x67, 0x2f, 0x32, 0x14, 0x11, 0xd7, 0xf1, 0xdf, 0x1e, - 0x0e, 0x70, 0xab, 0xbb, 0x90, 0x70, 0xb8, 0xa0, 0x0e, 0x4c, 0xb9, 0xda, 0xf5, 0xf0, 0x31, 0xa3, - 0xd8, 0xda, 0xef, 0xcf, 0x14, 0xa4, 0xe2, 0x91, 0xc1, 0x22, 0x96, 0x1a, 0x45, 0x37, 0x9a, 0x72, - 0x29, 0x8e, 0x21, 0x56, 0xbf, 0xa3, 0x40, 0x89, 0xc7, 0xf6, 0xde, 0x11, 0x2a, 0x31, 0xeb, 0x6a, - 0x66, 0x76, 0x03, 0xaa, 0xa9, 0xb2, 0x1a, 0x77, 0x88, 0xa6, 0xca, 0xfa, 0x9a, 0x71, 0x0c, 0x7d, - 0xa7, 0x28, 0xc6, 0xc2, 0xe4, 0x7c, 0x13, 0x4e, 0x0a, 0x7d, 0xe5, 0x8a, 0xb9, 0x41, 0xe8, 0x52, - 0x37, 0xb4, 0x5d, 0xee, 0x5a, 0x2f, 0xf1, 0x8f, 0x42, 0xd4, 0x87, 0xc1, 0x38, 0xad, 0x0d, 0xfa, - 0x2b, 0x85, 0x4a, 0x54, 0xdf, 0x35, 0xf5, 0x91, 0x12, 0x31, 0x84, 0x7d, 0x5b, 0x5c, 0xe1, 0xc8, - 0xb8, 0xa1, 0xb7, 0x16, 0x89, 0x56, 0x56, 0x7a, 0x73, 0x50, 0xad, 0xa6, 0x78, 0x91, 0x82, 0xcc, - 0x03, 0x74, 0x62, 0x5f, 0xfb, 0xc1, 0xbe, 0x55, 0xd8, 0xe5, 0x47, 0xd0, 0x63, 0x74, 0x09, 0x4a, - 0x9e, 0xee, 0xf4, 0xc8, 0x7e, 0xc9, 0xc0, 0x93, 0x9a, 0x7a, 0x38, 0xc1, 0x6d, 0xda, 0x12, 0x73, - 0x04, 0x0b, 0xaf, 0xc0, 0x94, 0xdc, 0xf3, 0x14, 0x43, 0xb2, 0x21, 0x1b, 0x92, 0x87, 0xbe, 0x9d, - 0x90, 0x0d, 0xcf, 0xcf, 0x29, 0x20, 0x52, 0xa5, 0x1e, 0xe0, 0xfa, 0xcd, 0x0c, 0xde, 0xa8, 0x8f, - 0x70, 0xf7, 0x96, 0xfc, 0x52, 0x43, 0x34, 0x07, 0xb1, 0x67, 0xea, 0xbf, 0x53, 0x80, 0x69, 0x6e, - 0xbe, 0xd4, 0x4c, 0xdb, 0x30, 0xed, 0xce, 0x31, 0x48, 0x81, 0x58, 0x4a, 0xf7, 0xc2, 0x51, 0xa5, - 0x74, 0xbf, 0x0c, 0xe3, 0xaf, 0x52, 0x8e, 0x0c, 0x52, 0x80, 0x1e, 0x88, 0x31, 0x42, 0x95, 0x96, - 0x31, 0xb3, 0x87, 0x05, 0x0a, 0xf5, 0x9f, 0x15, 0x98, 0x8b, 0x4d, 0xcb, 0x31, 0xc8, 0x93, 0x8d, - 0xb8, 0x3c, 0x59, 0xca, 0xf7, 0x10, 0x4c, 0xea, 0x73, 0x86, 0x5c, 0xf9, 0x5a, 0x01, 0xc6, 0xda, - 0x84, 0x18, 0xc7, 0xb0, 0xd2, 0x2f, 0xc7, 0xe4, 0xfd, 0x87, 0xf3, 0x8d, 0x88, 0x18, 0x99, 0x46, - 0xce, 0x46, 0xc2, 0xc8, 0x79, 0x36, 0x37, 0x85, 0xfd, 0x2d, 0x9c, 0x6f, 0x14, 0x00, 0x68, 0x35, - 0x9e, 0x3f, 0x5b, 0xc4, 0x21, 0x44, 0xdf, 0x62, 0x28, 0xbf, 0xa3, 0xbf, 0xa0, 0xa0, 0x86, 0xa9, - 0x9b, 0x8b, 0x91, 0x5f, 0x2d, 0x9e, 0xb6, 0x39, 0xbe, 0x25, 0xc7, 0x8e, 0x68, 0x4b, 0xaa, 0x17, - 0x80, 0xa5, 0xa0, 0x6a, 0xac, 0xb6, 0xd1, 0xd3, 0x30, 0x6d, 0xf2, 0x4b, 0x8b, 0x86, 0x9c, 0x69, - 0x25, 0x7c, 0x32, 0xd9, 0x94, 0x81, 0x38, 0x5e, 0x57, 0xfd, 0x96, 0x02, 0x2c, 0xa3, 0xf9, 0x31, - 0x6c, 0xc2, 0x9f, 0x8c, 0x6f, 0xc2, 0x0f, 0xe5, 0x65, 0xa8, 0x8c, 0xbd, 0xf7, 0xe5, 0x02, 0xb0, - 0x0c, 0xb4, 0xe2, 0xae, 0x4f, 0xba, 0x29, 0x53, 0x32, 0x6e, 0xca, 0xee, 0x17, 0x17, 0x6d, 0x09, - 0x93, 0x51, 0xba, 0x6c, 0x7b, 0x9f, 0x74, 0x97, 0x56, 0x8c, 0x73, 0xe3, 0xf0, 0x7d, 0x1a, 0xba, - 0x01, 0xd3, 0xde, 0xa6, 0xe3, 0xf8, 0x0d, 0xb2, 0xa1, 0xf5, 0x2d, 0xdf, 0x13, 0xab, 0xbd, 0x94, - 0x3b, 0xfa, 0x24, 0x18, 0x0a, 0x77, 0x93, 0xb7, 0x65, 0xdc, 0x38, 0x4e, 0x0a, 0x2d, 0x02, 0xac, - 0x5b, 0x8e, 0xbe, 0x55, 0x6f, 0x36, 0x30, 0x37, 0xe6, 0x44, 0x26, 0x97, 0x5a, 0x58, 0x8a, 0xa5, - 0x1a, 0x2c, 0x99, 0x38, 0x9d, 0xad, 0x43, 0xd8, 0x33, 0x6f, 0xf7, 0x66, 0x7b, 0x4f, 0x62, 0xb3, - 0x65, 0xe5, 0x49, 0xff, 0x56, 0x89, 0xb3, 0x34, 0xd3, 0xed, 0xd6, 0x61, 0x7c, 0x9d, 0x49, 0x16, - 0xc1, 0xd0, 0xb9, 0xc5, 0x98, 0xf8, 0x50, 0x09, 0xdb, 0xe1, 0xfc, 0x7f, 0x2c, 0x30, 0xa3, 0x97, - 0xa0, 0x68, 0xd8, 0x41, 0x6e, 0xb5, 0xa7, 0xf3, 0x12, 0x68, 0xac, 0xb6, 0xa3, 0x3b, 0xf3, 0xc6, - 0x6a, 0x1b, 0x53, 0xa4, 0xc8, 0x86, 0x49, 0x5b, 0x70, 0x81, 0x10, 0xc4, 0xcf, 0xe7, 0x25, 0x10, - 0x72, 0x53, 0xc8, 0xc3, 0x41, 0x09, 0x0e, 0x69, 0x50, 0x7a, 0xa1, 0xfc, 0x1d, 0x1b, 0x8d, 0xde, - 0x81, 0xbe, 0x81, 0xd3, 0x92, 0xa5, 0xe3, 0x21, 0xbe, 0x17, 0x32, 0x9d, 0xa9, 0xac, 0x10, 0x18, - 0xf7, 0x35, 0xe6, 0x19, 0x1e, 0xcf, 0xef, 0x99, 0xa5, 0xfd, 0xbf, 0x46, 0xb1, 0x44, 0x5c, 0xc6, - 0x7e, 0x7a, 0x58, 0x20, 0xa7, 0x8c, 0xc5, 0xf3, 0x48, 0x8b, 0x48, 0xdf, 0xdc, 0x8c, 0xc5, 0x53, - 0x53, 0x73, 0xc6, 0xe2, 0xff, 0x63, 0x81, 0x59, 0xfd, 0x5f, 0x71, 0x36, 0x0a, 0x07, 0xc8, 0x06, - 0x4c, 0x06, 0xf8, 0x46, 0x89, 0x98, 0x08, 0xd2, 0xe2, 0xf0, 0x9b, 0xd0, 0xe0, 0x17, 0x0e, 0x71, - 0xa3, 0x7a, 0x5a, 0x70, 0x4a, 0xf4, 0x6e, 0xf8, 0x60, 0x81, 0x29, 0x71, 0x27, 0x79, 0xf1, 0x38, - 0x9c, 0xe4, 0x47, 0xf8, 0x41, 0x1b, 0xf5, 0x32, 0x94, 0x43, 0x1e, 0x40, 0x67, 0x25, 0xeb, 0x25, - 0xda, 0xa3, 0x97, 0xc9, 0x2e, 0x37, 0x65, 0xaa, 0xb1, 0x3b, 0x31, 0x7e, 0xd0, 0xbc, 0x40, 0x0b, - 0x84, 0x95, 0xa2, 0xfe, 0xbb, 0xc2, 0xd7, 0x91, 0x2f, 0x2f, 0xd2, 0xa0, 0xd2, 0xe5, 0x4f, 0xbc, - 0xd8, 0x43, 0xf3, 0x7c, 0x61, 0x78, 0xcc, 0x7b, 0xb8, 0x12, 0xa1, 0xc1, 0x32, 0x4e, 0x74, 0x7d, - 0x38, 0xa5, 0xda, 0x85, 0xd1, 0x18, 0xf4, 0xc0, 0x99, 0xd5, 0xd0, 0x70, 0x1b, 0x39, 0xc9, 0x96, - 0xb2, 0x7f, 0x92, 0xad, 0x5b, 0xe7, 0x63, 0x52, 0xbf, 0xa2, 0x40, 0x6a, 0xee, 0x05, 0xaa, 0x8d, - 0xf1, 0x8b, 0x73, 0x39, 0xd7, 0x1c, 0xbf, 0x55, 0xc7, 0x02, 0x82, 0x5c, 0x38, 0x25, 0xae, 0xa5, - 0x2f, 0x93, 0xdd, 0x28, 0xcb, 0x99, 0x10, 0xde, 0x07, 0xbf, 0xa0, 0x62, 0xaf, 0x64, 0xda, 0x09, - 0x4c, 0x38, 0x15, 0x37, 0xbb, 0x19, 0x65, 0x87, 0xf7, 0x9d, 0xe1, 0x06, 0x62, 0x5d, 0xbd, 0xbd, - 0x37, 0xa3, 0x9c, 0xc4, 0xad, 0x6f, 0x46, 0x59, 0xbd, 0x3b, 0xc4, 0xdf, 0xc4, 0xfa, 0x9a, 0xa1, - 0x9b, 0x7e, 0x9f, 0xda, 0xbc, 0xc9, 0x78, 0xe2, 0x03, 0x78, 0x2b, 0xde, 0x6e, 0x95, 0xeb, 0xdd, - 0xc9, 0xd4, 0x67, 0x95, 0xd4, 0xb7, 0x07, 0x2f, 0xc2, 0x74, 0x4c, 0x5f, 0x0d, 0x43, 0xd8, 0x94, - 0xd4, 0x10, 0x36, 0x39, 0x42, 0xad, 0xb0, 0x6f, 0x84, 0xda, 0xff, 0x94, 0x05, 0x0f, 0x30, 0x5d, - 0xee, 0x65, 0x18, 0x67, 0x71, 0x64, 0x41, 0x06, 0xc5, 0xa7, 0x72, 0xc7, 0xa7, 0x79, 0x5c, 0x36, - 0xf0, 0xff, 0xb1, 0xc0, 0x8a, 0x1a, 0x30, 0xab, 0x4b, 0x9f, 0x6a, 0x91, 0x3e, 0xfc, 0x14, 0x66, - 0x7e, 0xa8, 0x27, 0xe0, 0x78, 0xa8, 0x05, 0xc2, 0x5c, 0x1b, 0xe4, 0xbb, 0xe3, 0x83, 0x39, 0x73, - 0x20, 0xf2, 0x9c, 0x6a, 0xa1, 0x16, 0xf8, 0x2a, 0x00, 0x09, 0xbc, 0xca, 0x81, 0xb7, 0xfc, 0x99, - 0x7c, 0x61, 0xe3, 0x81, 0xff, 0x3a, 0x14, 0x23, 0x61, 0x91, 0x87, 0x25, 0x22, 0xc8, 0x85, 0xca, - 0x66, 0x94, 0x19, 0x4e, 0xa8, 0x66, 0xcf, 0x8d, 0x98, 0x95, 0x8e, 0x9f, 0x5a, 0x52, 0x01, 0x96, - 0x89, 0x20, 0x37, 0xf6, 0x39, 0x80, 0x11, 0x32, 0x19, 0x45, 0x1a, 0xca, 0x2d, 0xbf, 0x72, 0xe3, - 0x02, 0xd8, 0x61, 0x74, 0xe8, 0x28, 0xba, 0x5c, 0x14, 0x63, 0x1a, 0xd1, 0x8c, 0xca, 0xb0, 0x44, - 0x85, 0xce, 0x6d, 0x37, 0x8a, 0xb2, 0x15, 0x79, 0x17, 0x9e, 0x1b, 0x31, 0x26, 0x59, 0x68, 0x04, - 0x51, 0x01, 0x96, 0x89, 0x20, 0x1b, 0xa0, 0x1b, 0x06, 0xdf, 0x8a, 0xec, 0x0a, 0xb9, 0xc6, 0x19, - 0x85, 0xf0, 0x72, 0x03, 0x33, 0xfa, 0x8d, 0x25, 0x0a, 0xe8, 0x15, 0xc9, 0x90, 0x80, 0xfc, 0xca, - 0xea, 0x81, 0x8c, 0x88, 0x27, 0x22, 0xf5, 0xa2, 0xc2, 0xf6, 0xeb, 0xbd, 0x92, 0x6a, 0x71, 0x73, - 0x50, 0x9d, 0x62, 0x32, 0x64, 0x48, 0xd5, 0x88, 0x0c, 0xca, 0xa9, 0xfd, 0x0c, 0x4a, 0x74, 0x11, - 0xe6, 0x3c, 0xd9, 0xf9, 0xc7, 0x04, 0xc3, 0x34, 0x6b, 0x12, 0x06, 0x6b, 0xb7, 0x93, 0x15, 0xf0, - 0x70, 0x9b, 0xd8, 0x17, 0xe5, 0x66, 0xf6, 0xfd, 0xa2, 0xdc, 0x7f, 0x4f, 0x40, 0x45, 0x3a, 0x24, - 0xdf, 0x8e, 0x90, 0x13, 0x1f, 0x2a, 0xba, 0x63, 0x7b, 0xbe, 0xab, 0x49, 0x61, 0x36, 0x23, 0xd2, - 0x0c, 0x23, 0xd5, 0xea, 0x11, 0x66, 0x2c, 0x93, 0xa1, 0x6c, 0x13, 0xda, 0x38, 0xc5, 0x23, 0xb0, - 0x71, 0x42, 0xb6, 0x49, 0xb1, 0x73, 0x1e, 0x07, 0x08, 0xa4, 0x0f, 0xe1, 0xc9, 0x72, 0x26, 0xa3, - 0x6b, 0xbd, 0xa6, 0x77, 0x29, 0x84, 0x61, 0xa9, 0xde, 0xf0, 0x67, 0xfd, 0x4a, 0xc7, 0xf7, 0x59, - 0xbf, 0x57, 0x01, 0xc2, 0xef, 0xee, 0x8d, 0x64, 0xdf, 0x46, 0xdf, 0xf5, 0x0b, 0xd9, 0x20, 0x2c, - 0xf2, 0xb0, 0x44, 0x24, 0xc3, 0xa8, 0x9a, 0xc8, 0x75, 0xeb, 0xde, 0x87, 0x93, 0x2e, 0xf1, 0xdd, - 0xdd, 0xfa, 0xae, 0xce, 0x12, 0x4e, 0xba, 0x3e, 0x7b, 0xd5, 0x36, 0x79, 0xe8, 0x57, 0x6d, 0xec, - 0x52, 0x0e, 0x0f, 0xa3, 0xc2, 0x69, 0xf8, 0x63, 0xdb, 0xae, 0xbc, 0xdf, 0xb6, 0x43, 0x4f, 0x40, - 0xc5, 0x27, 0xfa, 0xa6, 0x6d, 0xea, 0x9a, 0xd5, 0x6c, 0x88, 0x48, 0xd3, 0x90, 0x69, 0xaf, 0x45, - 0x20, 0x2c, 0xd7, 0x43, 0x35, 0x28, 0xf6, 0x4d, 0x43, 0xc8, 0x9e, 0x0f, 0x84, 0x29, 0x86, 0x9b, - 0x8d, 0x9b, 0x83, 0xea, 0x03, 0x59, 0xdf, 0xcb, 0xf7, 0x77, 0x7b, 0xc4, 0x5b, 0x5c, 0x6b, 0x36, - 0x30, 0x6d, 0xac, 0xfe, 0x9e, 0x02, 0xc2, 0xfc, 0xa7, 0xda, 0x93, 0xa4, 0x17, 0x4e, 0x26, 0x74, - 0xc2, 0xfb, 0x62, 0xc9, 0xc2, 0x27, 0x87, 0x3e, 0x26, 0xc8, 0xdf, 0xfc, 0x14, 0xe3, 0x9f, 0xb8, - 0xe4, 0x98, 0xa5, 0x27, 0x3c, 0x0f, 0x43, 0x99, 0xd8, 0xba, 0xbb, 0xdb, 0x8b, 0x58, 0x7f, 0x9a, - 0x07, 0xc0, 0x89, 0x42, 0x1c, 0xc1, 0xd5, 0xeb, 0x00, 0xd1, 0x07, 0xb4, 0x0e, 0xf6, 0xf6, 0xe8, - 0xd6, 0x69, 0x75, 0xa3, 0xd7, 0x4f, 0xc5, 0xcc, 0xd7, 0x4f, 0x7f, 0x30, 0x05, 0xe3, 0xfc, 0x42, - 0x1f, 0xbd, 0xa6, 0x40, 0x45, 0xb3, 0x6d, 0xc7, 0xd7, 0x64, 0x19, 0x78, 0x39, 0x7f, 0xc8, 0xc0, - 0xe2, 0x52, 0x84, 0x2d, 0x11, 0x47, 0x2b, 0x41, 0xb0, 0x4c, 0x34, 0x16, 0x1d, 0x5f, 0xd8, 0x37, - 0x3a, 0x1e, 0x43, 0x51, 0x77, 0xcd, 0x51, 0xb4, 0xc0, 0x3a, 0x6e, 0x72, 0x2d, 0xb0, 0x8e, 0x9b, - 0x98, 0x22, 0x43, 0x7e, 0x4c, 0x3d, 0x1a, 0xcb, 0x9f, 0xd9, 0x8c, 0x4f, 0x80, 0xa4, 0x24, 0xcd, - 0xec, 0xa3, 0x20, 0xd9, 0x30, 0x6e, 0x69, 0xeb, 0xc4, 0x0a, 0x42, 0x84, 0x2e, 0x8c, 0x30, 0xe5, - 0xec, 0xa3, 0x71, 0x5e, 0xe2, 0x51, 0x05, 0x2f, 0xc4, 0x82, 0x4a, 0xc8, 0x39, 0xe3, 0x99, 0x9c, - 0xb3, 0x01, 0x13, 0x62, 0x2b, 0x09, 0x7d, 0xed, 0xe9, 0x11, 0x9e, 0xe9, 0x49, 0xf1, 0x4a, 0xbc, - 0x00, 0x07, 0xc8, 0xd9, 0xd7, 0x89, 0xb4, 0x1d, 0xb3, 0xdb, 0xef, 0x32, 0x11, 0x55, 0x92, 0xab, - 0xb2, 0x62, 0x1c, 0xc0, 0x59, 0x55, 0xee, 0x7e, 0x61, 0x12, 0x46, 0xae, 0x2a, 0x92, 0xf9, 0x04, - 0x70, 0xf4, 0x12, 0x4c, 0x76, 0xb5, 0x9d, 0x76, 0xdf, 0xed, 0x10, 0xa1, 0x18, 0x65, 0xfb, 0x00, - 0xfa, 0xbe, 0x69, 0x2d, 0xd2, 0x73, 0xd1, 0x77, 0x17, 0x9b, 0xb6, 0x7f, 0xd5, 0x6d, 0xfb, 0x6e, - 0xf8, 0xd6, 0x69, 0x45, 0x60, 0xc1, 0x21, 0x3e, 0x64, 0xc1, 0x4c, 0x57, 0xdb, 0x91, 0x3e, 0xcd, - 0xcf, 0x64, 0x52, 0x1e, 0x0a, 0xcc, 0x3a, 0x5c, 0x89, 0xe1, 0xc2, 0x09, 0xdc, 0x29, 0xd6, 0xe9, - 0xd4, 0xb1, 0x5a, 0xa7, 0x4b, 0xa1, 0xa7, 0x77, 0x9a, 0x71, 0xe6, 0x3d, 0x69, 0xde, 0x9b, 0xfd, - 0xbd, 0xb8, 0x2f, 0x87, 0x5e, 0xdc, 0x99, 0xfc, 0x26, 0x65, 0xb6, 0x07, 0x17, 0xbd, 0x0a, 0x15, - 0x43, 0xf3, 0x35, 0x5e, 0xea, 0xcd, 0x9f, 0xc8, 0x1f, 0xe7, 0x24, 0x88, 0x44, 0xaf, 0xc6, 0x23, - 0xb4, 0x58, 0xa6, 0x81, 0xae, 0xf2, 0xd4, 0x20, 0x16, 0xf1, 0xa3, 0x2a, 0xec, 0xe8, 0x9b, 0xe5, - 0x1a, 0x6b, 0x90, 0xc9, 0x63, 0xa8, 0x02, 0x4e, 0x6f, 0x87, 0xaa, 0x41, 0xc8, 0xc4, 0x5c, 0x14, - 0x37, 0x29, 0x07, 0x3a, 0x2c, 0x3c, 0x0b, 0xb3, 0x49, 0x59, 0x7a, 0x98, 0x97, 0x03, 0x0b, 0x4f, - 0x42, 0x45, 0x12, 0x0c, 0x87, 0x7a, 0x74, 0xf0, 0x33, 0x30, 0x9b, 0x14, 0x66, 0x72, 0x12, 0x0e, - 0xe5, 0xb6, 0x26, 0xe1, 0xa8, 0xbd, 0xff, 0xcd, 0x1f, 0x9e, 0xbb, 0xeb, 0xbb, 0x3f, 0x3c, 0x77, - 0xd7, 0x5b, 0x3f, 0x3c, 0x77, 0xd7, 0xa7, 0xf7, 0xce, 0x29, 0x6f, 0xee, 0x9d, 0x53, 0xbe, 0xbb, - 0x77, 0x4e, 0x79, 0x6b, 0xef, 0x9c, 0xf2, 0xaf, 0x7b, 0xe7, 0x94, 0x37, 0xfe, 0xed, 0xdc, 0x5d, - 0x2f, 0x4d, 0x08, 0x94, 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0xf3, 0x69, 0x16, 0xac, 0xff, 0x88, - 0x00, 0x00, + // 8871 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x7d, 0x6d, 0x8c, 0x24, 0xc7, + 0x75, 0x18, 0x7b, 0xf6, 0x6b, 0xf6, 0xcd, 0x7e, 0xd6, 0x7d, 0x70, 0xb9, 0x24, 0x6f, 0xa8, 0x96, + 0x28, 0x90, 0x91, 0xb4, 0x67, 0x52, 0x92, 0x29, 0x92, 0xa2, 0xc8, 0x9d, 0x99, 0xbd, 0xbb, 0xd1, + 0xdd, 0xee, 0x0d, 0x6b, 0x6e, 0x8f, 0x12, 0x9d, 0x30, 0xea, 0xed, 0xae, 0x9d, 0x6d, 0x6e, 0x4f, + 0xf7, 0xb0, 0xbb, 0x67, 0x6f, 0xf7, 0xe4, 0x40, 0x8a, 0x10, 0xd9, 0xf9, 0x70, 0x1c, 0xc3, 0xb1, + 0x22, 0xc4, 0x4a, 0x60, 0x19, 0x41, 0x62, 0x04, 0x0e, 0x64, 0x07, 0x89, 0x8c, 0x04, 0x49, 0x00, + 0xc3, 0x46, 0x20, 0xd9, 0x48, 0x00, 0x21, 0x88, 0x10, 0x05, 0x89, 0x57, 0xd1, 0x06, 0x10, 0x12, + 0x24, 0xff, 0x8c, 0xfc, 0x39, 0x18, 0x88, 0x51, 0x1f, 0x5d, 0x5d, 0xdd, 0xd3, 0xbd, 0x3b, 0xdb, + 0xb3, 0x7b, 0xe4, 0xfd, 0xda, 0x9d, 0x7a, 0x55, 0xef, 0x55, 0x57, 0xbd, 0x7a, 0xf5, 0xde, 0xab, + 0x57, 0xaf, 0xa0, 0xd6, 0xb1, 0xc3, 0x9d, 0xfe, 0xd6, 0x8a, 0xe9, 0x75, 0xaf, 0x76, 0x0c, 0xdf, + 0x22, 0x2e, 0xf1, 0xe3, 0x7f, 0x7a, 0xbb, 0x9d, 0xab, 0x46, 0xcf, 0x0e, 0xae, 0x9a, 0x9e, 0x4f, + 0xae, 0xee, 0xbd, 0xb0, 0x45, 0x42, 0xe3, 0x85, 0xab, 0x1d, 0x0a, 0x33, 0x42, 0x62, 0xad, 0xf4, + 0x7c, 0x2f, 0xf4, 0xd0, 0x8b, 0x31, 0x8e, 0x95, 0xa8, 0x69, 0xfc, 0x4f, 0x6f, 0xb7, 0xb3, 0x42, + 0x71, 0xac, 0x50, 0x1c, 0x2b, 0x02, 0xc7, 0xf2, 0x27, 0x54, 0xba, 0x5e, 0xc7, 0xbb, 0xca, 0x50, + 0x6d, 0xf5, 0xb7, 0xd9, 0x2f, 0xf6, 0x83, 0xfd, 0xc7, 0x49, 0x2c, 0x3f, 0xbf, 0xfb, 0x99, 0x60, + 0xc5, 0xf6, 0x68, 0x67, 0xae, 0x1a, 0xfd, 0xd0, 0x0b, 0x4c, 0xc3, 0xb1, 0xdd, 0xce, 0xd5, 0xbd, + 0x81, 0xde, 0x2c, 0xeb, 0x4a, 0x55, 0xd1, 0xed, 0x63, 0xeb, 0xf8, 0x5b, 0x86, 0x99, 0x55, 0xe7, + 0x53, 0x71, 0x9d, 0xae, 0x61, 0xee, 0xd8, 0x2e, 0xf1, 0x0f, 0xa2, 0x01, 0xb9, 0xea, 0x93, 0xc0, + 0xeb, 0xfb, 0x26, 0x39, 0x55, 0xab, 0xe0, 0x6a, 0x97, 0x84, 0x46, 0x16, 0xad, 0xab, 0x79, 0xad, + 0xfc, 0xbe, 0x1b, 0xda, 0xdd, 0x41, 0x32, 0x3f, 0x7b, 0x52, 0x83, 0xc0, 0xdc, 0x21, 0x5d, 0x63, + 0xa0, 0xdd, 0x27, 0xf3, 0xda, 0xf5, 0x43, 0xdb, 0xb9, 0x6a, 0xbb, 0x61, 0x10, 0xfa, 0xe9, 0x46, + 0xfa, 0x8b, 0x30, 0xb1, 0x6a, 0x59, 0x9e, 0x8b, 0x9e, 0x87, 0x29, 0xe2, 0x1a, 0x5b, 0x0e, 0xb1, + 0x96, 0xb4, 0x67, 0xb4, 0xe7, 0xca, 0xb5, 0xf9, 0xef, 0x1f, 0x56, 0x1f, 0x3b, 0x3a, 0xac, 0x4e, + 0xad, 0xf1, 0x62, 0x1c, 0xc1, 0xf5, 0x6f, 0x94, 0x60, 0x92, 0x35, 0x0a, 0xd0, 0xaf, 0x6a, 0x70, + 0x61, 0xb7, 0xbf, 0x45, 0x7c, 0x97, 0x84, 0x24, 0x68, 0x18, 0xc1, 0xce, 0x96, 0x67, 0xf8, 0x1c, + 0x45, 0xe5, 0xc5, 0xeb, 0x2b, 0xa7, 0xe7, 0x9e, 0x95, 0x9b, 0x83, 0xe8, 0x6a, 0x8f, 0x1f, 0x1d, + 0x56, 0x2f, 0x64, 0x00, 0x70, 0x16, 0x71, 0xb4, 0x07, 0x33, 0x6e, 0xc7, 0x76, 0xf7, 0x9b, 0x6e, + 0xc7, 0x27, 0x41, 0xb0, 0x54, 0x62, 0x9d, 0x79, 0xa3, 0x48, 0x67, 0x36, 0x14, 0x3c, 0xb5, 0x85, + 0xa3, 0xc3, 0xea, 0x8c, 0x5a, 0x82, 0x13, 0x74, 0xf4, 0x5f, 0xd0, 0x60, 0x7e, 0xd5, 0xea, 0xda, + 0x41, 0x60, 0x7b, 0x6e, 0xcb, 0xe9, 0x77, 0x6c, 0x17, 0x3d, 0x03, 0xe3, 0xae, 0xd1, 0x25, 0x6c, + 0x40, 0xa6, 0x6b, 0x33, 0x62, 0x4c, 0xc7, 0x37, 0x8c, 0x2e, 0xc1, 0x0c, 0x82, 0xde, 0x84, 0x49, + 0xd3, 0x73, 0xb7, 0xed, 0x8e, 0xe8, 0xe7, 0x27, 0x56, 0xf8, 0x3c, 0xae, 0xa8, 0xf3, 0xc8, 0xba, + 0x27, 0xe6, 0x7f, 0x05, 0x1b, 0xf7, 0xd6, 0xf6, 0x43, 0xe2, 0x52, 0x32, 0x35, 0x38, 0x3a, 0xac, + 0x4e, 0xd6, 0x19, 0x02, 0x2c, 0x10, 0xe9, 0xd7, 0xa0, 0xbc, 0xea, 0x10, 0x3f, 0xb4, 0xdd, 0x0e, + 0x7a, 0x05, 0xe6, 0x48, 0xd7, 0xb0, 0x1d, 0x4c, 0x4c, 0x62, 0xef, 0x11, 0x3f, 0x58, 0xd2, 0x9e, + 0x19, 0x7b, 0x6e, 0xba, 0x86, 0x8e, 0x0e, 0xab, 0x73, 0x6b, 0x09, 0x08, 0x4e, 0xd5, 0xd4, 0xff, + 0xaa, 0x06, 0x95, 0xd5, 0xbe, 0x65, 0x87, 0x1c, 0x3f, 0xf2, 0xa1, 0x62, 0xd0, 0x9f, 0x2d, 0xcf, + 0xb1, 0xcd, 0x03, 0x31, 0xc9, 0xaf, 0x17, 0x19, 0xd7, 0xd5, 0x18, 0x4d, 0x6d, 0xfe, 0xe8, 0xb0, + 0x5a, 0x51, 0x0a, 0xb0, 0x4a, 0x44, 0xdf, 0x01, 0x15, 0x86, 0xbe, 0x08, 0x33, 0xfc, 0x23, 0xd7, + 0x8d, 0x1e, 0x26, 0xdb, 0xa2, 0x0f, 0x1f, 0x56, 0xc6, 0x2c, 0x22, 0xb4, 0x72, 0x7b, 0xeb, 0x5d, + 0x62, 0x86, 0x98, 0x6c, 0x13, 0x9f, 0xb8, 0x26, 0xe1, 0xd3, 0x57, 0x57, 0x1a, 0xe3, 0x04, 0x2a, + 0xfd, 0xc7, 0x1a, 0x2c, 0xac, 0xee, 0x19, 0xb6, 0x63, 0x6c, 0xd9, 0x8e, 0x1d, 0x1e, 0xbc, 0xed, + 0xb9, 0x64, 0x88, 0xf9, 0xdb, 0x84, 0xc7, 0xfb, 0xae, 0xc1, 0xdb, 0x39, 0x64, 0x9d, 0xcf, 0xd8, + 0x9d, 0x83, 0x1e, 0xa1, 0x8c, 0x47, 0x47, 0xfa, 0xc9, 0xa3, 0xc3, 0xea, 0xe3, 0x9b, 0xd9, 0x55, + 0x70, 0x5e, 0x5b, 0x84, 0xe1, 0xb2, 0x02, 0xba, 0xeb, 0x39, 0xfd, 0xae, 0xc0, 0x3a, 0xc6, 0xb0, + 0x2e, 0x1f, 0x1d, 0x56, 0x2f, 0x6f, 0x66, 0xd6, 0xc0, 0x39, 0x2d, 0xf5, 0xef, 0x97, 0x60, 0xa6, + 0x66, 0x98, 0xbb, 0xfd, 0x5e, 0xad, 0x6f, 0xee, 0x92, 0x10, 0x7d, 0x09, 0xca, 0x54, 0x6c, 0x59, + 0x46, 0x68, 0x88, 0x91, 0xfc, 0x99, 0x5c, 0xee, 0x63, 0x93, 0x48, 0x6b, 0xc7, 0x63, 0xbb, 0x4e, + 0x42, 0xa3, 0x86, 0xc4, 0x98, 0x40, 0x5c, 0x86, 0x25, 0x56, 0xb4, 0x0d, 0xe3, 0x41, 0x8f, 0x98, + 0x82, 0xb7, 0x1b, 0x45, 0x78, 0x45, 0xed, 0x71, 0xbb, 0x47, 0xcc, 0x78, 0x16, 0xe8, 0x2f, 0xcc, + 0xf0, 0x23, 0x17, 0x26, 0x83, 0xd0, 0x08, 0xfb, 0x74, 0x78, 0x28, 0xa5, 0x6b, 0x23, 0x53, 0x62, + 0xd8, 0x6a, 0x73, 0x82, 0xd6, 0x24, 0xff, 0x8d, 0x05, 0x15, 0xfd, 0xbf, 0x68, 0xb0, 0xa0, 0x56, + 0xbf, 0x65, 0x07, 0x21, 0xfa, 0x8b, 0x03, 0xc3, 0xb9, 0x32, 0xdc, 0x70, 0xd2, 0xd6, 0x6c, 0x30, + 0x17, 0x04, 0xb9, 0x72, 0x54, 0xa2, 0x0c, 0x25, 0x81, 0x09, 0x3b, 0x24, 0x5d, 0xce, 0x56, 0x05, + 0xe5, 0x99, 0xda, 0xe5, 0xda, 0xac, 0x20, 0x36, 0xd1, 0xa4, 0x68, 0x31, 0xc7, 0xae, 0x7f, 0x09, + 0x2e, 0xaa, 0xb5, 0x5a, 0xbe, 0xb7, 0x67, 0x5b, 0xc4, 0xa7, 0x2b, 0x21, 0x3c, 0xe8, 0x0d, 0xac, + 0x04, 0xca, 0x59, 0x98, 0x41, 0xd0, 0x47, 0x61, 0xd2, 0x27, 0x1d, 0xdb, 0x73, 0xd9, 0x6c, 0x4f, + 0xc7, 0x63, 0x87, 0x59, 0x29, 0x16, 0x50, 0xfd, 0xff, 0x95, 0x92, 0x63, 0x47, 0xa7, 0x11, 0xed, + 0x41, 0xb9, 0x27, 0x48, 0x89, 0xb1, 0xbb, 0x31, 0xea, 0x07, 0x46, 0x5d, 0x8f, 0x47, 0x35, 0x2a, + 0xc1, 0x92, 0x16, 0xb2, 0x61, 0x2e, 0xfa, 0xbf, 0x3e, 0x82, 0x18, 0x66, 0xe2, 0xb4, 0x95, 0x40, + 0x84, 0x53, 0x88, 0xd1, 0x1d, 0x98, 0x0e, 0x88, 0xe9, 0x13, 0x2a, 0x93, 0x04, 0x9b, 0x66, 0x0a, + 0xae, 0x76, 0x54, 0x49, 0x08, 0xae, 0x45, 0xd1, 0xfd, 0x69, 0x09, 0xc0, 0x31, 0x22, 0xf4, 0x1c, + 0x94, 0x03, 0x42, 0x2c, 0x2a, 0x91, 0x96, 0xc6, 0xf9, 0xdc, 0xd0, 0x4f, 0x6d, 0x8b, 0x32, 0x2c, + 0xa1, 0xfa, 0xb7, 0xc7, 0x01, 0x0d, 0xb2, 0xb8, 0x3a, 0x02, 0xbc, 0x44, 0x8c, 0xff, 0x28, 0x23, + 0x20, 0x56, 0x4b, 0x0a, 0x31, 0xba, 0x0f, 0xb3, 0x8e, 0x11, 0x84, 0xb7, 0x7b, 0x54, 0x07, 0x89, + 0x18, 0xa5, 0xf2, 0xe2, 0x6a, 0x91, 0x99, 0xbe, 0xa5, 0x22, 0xaa, 0x2d, 0x1e, 0x1d, 0x56, 0x67, + 0x13, 0x45, 0x38, 0x49, 0x0a, 0xbd, 0x0b, 0xd3, 0xb4, 0x60, 0xcd, 0xf7, 0x3d, 0x5f, 0x8c, 0xfe, + 0x6b, 0x45, 0xe9, 0x32, 0x24, 0xb5, 0x59, 0x3a, 0x27, 0xf2, 0x27, 0x8e, 0xd1, 0xa3, 0xcf, 0x03, + 0xf2, 0xb6, 0x02, 0xe2, 0xef, 0x11, 0xeb, 0x3a, 0x57, 0xb8, 0xe8, 0xc7, 0xd2, 0xd9, 0x19, 0xab, + 0x2d, 0x8b, 0xd9, 0x44, 0xb7, 0x07, 0x6a, 0xe0, 0x8c, 0x56, 0x68, 0x17, 0x90, 0x54, 0xda, 0x24, + 0x03, 0x2c, 0x4d, 0x0c, 0xcf, 0x3e, 0x97, 0x29, 0xb1, 0xeb, 0x03, 0x28, 0x70, 0x06, 0x5a, 0xfd, + 0xdf, 0x97, 0xa0, 0xc2, 0x59, 0x64, 0xcd, 0x0d, 0xfd, 0x83, 0x87, 0xb0, 0x41, 0x90, 0xc4, 0x06, + 0x51, 0x2f, 0xbe, 0xe6, 0x59, 0x87, 0x73, 0xf7, 0x87, 0x6e, 0x6a, 0x7f, 0x58, 0x1b, 0x95, 0xd0, + 0xf1, 0xdb, 0xc3, 0x0f, 0x35, 0x98, 0x57, 0x6a, 0x3f, 0x84, 0xdd, 0xc1, 0x4a, 0xee, 0x0e, 0xaf, + 0x8f, 0xf8, 0x7d, 0x39, 0x9b, 0x83, 0x97, 0xf8, 0x2c, 0x26, 0xb8, 0x5f, 0x04, 0xd8, 0x62, 0xe2, + 0x64, 0x23, 0xd6, 0x93, 0xe4, 0x94, 0xd7, 0x24, 0x04, 0x2b, 0xb5, 0x12, 0x32, 0xab, 0x74, 0xac, + 0xcc, 0xfa, 0x5e, 0x09, 0x16, 0x07, 0x86, 0x7d, 0x50, 0x8e, 0x68, 0xef, 0x93, 0x1c, 0x29, 0xbd, + 0x1f, 0x72, 0x64, 0xac, 0x88, 0x1c, 0xd1, 0xff, 0x40, 0x83, 0xb1, 0x3a, 0x6e, 0xa2, 0x8f, 0x25, + 0x34, 0xda, 0xc7, 0x55, 0x8d, 0xf6, 0xc1, 0x61, 0x75, 0xaa, 0x8e, 0x9b, 0x8a, 0x72, 0xfb, 0xb7, + 0x35, 0x58, 0x34, 0x3d, 0x37, 0x34, 0x28, 0x6b, 0x62, 0x2e, 0xf6, 0x23, 0x16, 0x2b, 0xa4, 0xcc, + 0xd5, 0x53, 0xc8, 0x6a, 0x4f, 0x88, 0x0e, 0x2c, 0xa6, 0x21, 0x01, 0x1e, 0xa4, 0xac, 0x6f, 0xc2, + 0x74, 0xdd, 0xf1, 0xfa, 0x56, 0xd3, 0xdd, 0xf6, 0xce, 0x50, 0x23, 0xf9, 0x91, 0x06, 0x33, 0x0c, + 0x6f, 0xcb, 0xf7, 0xb6, 0x6d, 0x87, 0x3c, 0x1a, 0x8a, 0xb1, 0xda, 0xe3, 0x3c, 0xc1, 0xc7, 0x14, + 0x55, 0xb5, 0xe2, 0x23, 0xa2, 0xa8, 0xaa, 0x5d, 0xce, 0x91, 0x45, 0xdf, 0x98, 0x4a, 0x7e, 0x19, + 0x93, 0x46, 0xcf, 0x41, 0xd9, 0x34, 0x6a, 0x7d, 0xd7, 0x72, 0x24, 0x5f, 0xd0, 0x5e, 0xd6, 0x57, + 0x79, 0x19, 0x96, 0x50, 0x74, 0x1f, 0x20, 0x76, 0x1e, 0x88, 0x69, 0xb8, 0x36, 0x9a, 0xc3, 0xa2, + 0x4d, 0x42, 0x6a, 0x73, 0x07, 0xf1, 0xd4, 0xc7, 0x30, 0xac, 0x50, 0x43, 0x7f, 0x05, 0x66, 0xc5, + 0x20, 0x37, 0xbb, 0x46, 0x47, 0xd8, 0x74, 0x05, 0x47, 0x6a, 0x5d, 0x41, 0x54, 0xbb, 0x24, 0x08, + 0xcf, 0xaa, 0xa5, 0x01, 0x4e, 0x52, 0x43, 0x07, 0x30, 0xd3, 0x55, 0xed, 0xd4, 0xf1, 0xe2, 0x5b, + 0x86, 0x62, 0xb3, 0xd6, 0x2e, 0x0a, 0xe2, 0x33, 0x09, 0x0b, 0x37, 0x41, 0x2a, 0x43, 0xdd, 0x9e, + 0x38, 0x2f, 0x75, 0x9b, 0xc0, 0x14, 0x5f, 0xde, 0xc1, 0xd2, 0x24, 0xfb, 0xc0, 0x57, 0x8a, 0x7c, + 0x20, 0x97, 0x14, 0xb1, 0x37, 0x8c, 0xff, 0x0e, 0x70, 0x84, 0x1b, 0xed, 0xc1, 0x0c, 0xdd, 0xad, + 0xda, 0xc4, 0x21, 0x66, 0xe8, 0xf9, 0x4b, 0x53, 0xc5, 0xbd, 0x4d, 0x6d, 0x05, 0x0f, 0x77, 0x57, + 0xa8, 0x25, 0x38, 0x41, 0x47, 0x4a, 0xbf, 0x72, 0xae, 0xf4, 0xeb, 0x43, 0x65, 0x4f, 0xf1, 0x1b, + 0x4c, 0xb3, 0x41, 0xf8, 0x5c, 0x91, 0x8e, 0xc5, 0x4e, 0x84, 0xda, 0x05, 0x41, 0xa8, 0xa2, 0x3a, + 0x1c, 0x54, 0x3a, 0xfa, 0x2f, 0x4e, 0xc2, 0x62, 0xdd, 0xe9, 0x07, 0x21, 0xf1, 0x57, 0x85, 0x3b, + 0x97, 0xf8, 0xe8, 0x6b, 0x1a, 0x5c, 0x66, 0xff, 0x36, 0xbc, 0x7b, 0x6e, 0x83, 0x38, 0xc6, 0xc1, + 0xea, 0x36, 0xad, 0x61, 0x59, 0xa7, 0x93, 0x40, 0x8d, 0xbe, 0xd8, 0xa9, 0x99, 0x03, 0xa4, 0x9d, + 0x89, 0x11, 0xe7, 0x50, 0x42, 0x7f, 0x4b, 0x83, 0x27, 0x32, 0x40, 0x0d, 0xe2, 0x90, 0x90, 0x08, + 0x19, 0x70, 0xda, 0x7e, 0x3c, 0x7d, 0x74, 0x58, 0x7d, 0xa2, 0x9d, 0x87, 0x14, 0xe7, 0xd3, 0x43, + 0xbf, 0xac, 0xc1, 0x72, 0x06, 0xf4, 0x9a, 0x61, 0x3b, 0x7d, 0x9f, 0x08, 0x45, 0xf5, 0xb4, 0xdd, + 0xb9, 0x72, 0x74, 0x58, 0x5d, 0x6e, 0xe7, 0x62, 0xc5, 0xc7, 0x50, 0x44, 0x5f, 0x81, 0x4b, 0x12, + 0xba, 0xe9, 0xba, 0x84, 0x58, 0xc4, 0xba, 0x63, 0x0b, 0xbb, 0xf2, 0xf4, 0x5d, 0x79, 0xe2, 0xe8, + 0xb0, 0x7a, 0xa9, 0x9d, 0x85, 0x10, 0x67, 0xd3, 0x41, 0x1d, 0x78, 0x3a, 0x06, 0x84, 0xb6, 0x63, + 0xdf, 0x67, 0x98, 0xee, 0xec, 0xf8, 0x24, 0xd8, 0xf1, 0x1c, 0x8b, 0x09, 0x0b, 0xad, 0xf6, 0xa1, + 0xa3, 0xc3, 0xea, 0xd3, 0xed, 0xe3, 0x2a, 0xe2, 0xe3, 0xf1, 0x20, 0x0b, 0x66, 0x02, 0xd3, 0x70, + 0x9b, 0x6e, 0x48, 0xfc, 0x3d, 0xc3, 0x59, 0x9a, 0x2c, 0xf4, 0x81, 0x7c, 0x89, 0x2a, 0x78, 0x70, + 0x02, 0xab, 0xfe, 0xbf, 0x35, 0xa8, 0x88, 0x95, 0xc0, 0x14, 0x96, 0x2d, 0x98, 0x30, 0xe9, 0x86, + 0x25, 0x38, 0xfe, 0xb5, 0xc2, 0x1b, 0x23, 0xc5, 0x16, 0xef, 0x8a, 0xac, 0x08, 0x73, 0xd4, 0x68, + 0x2f, 0x63, 0x5b, 0xab, 0x8d, 0xb6, 0xad, 0x31, 0x6a, 0x27, 0x6c, 0x69, 0xfa, 0xe1, 0x18, 0x4c, + 0xd7, 0x3d, 0xd7, 0xb2, 0x99, 0x92, 0xfc, 0x42, 0x42, 0x35, 0x7b, 0x5a, 0x15, 0x4e, 0x0f, 0x0e, + 0xab, 0xb3, 0xb2, 0xa2, 0x22, 0xad, 0x5e, 0x96, 0x16, 0x1a, 0xd7, 0xd5, 0x3e, 0x94, 0x34, 0xad, + 0x1e, 0x1c, 0x56, 0xe7, 0x65, 0xb3, 0xa4, 0xb5, 0x85, 0xf6, 0x00, 0x51, 0x9d, 0xf9, 0x8e, 0x6f, + 0xb8, 0x01, 0x47, 0x4b, 0x99, 0x96, 0xaf, 0x9f, 0xbf, 0x30, 0xdc, 0x9c, 0xd2, 0x16, 0xb1, 0x4a, + 0x7d, 0x6b, 0x00, 0x1b, 0xce, 0xa0, 0x80, 0xde, 0x85, 0x39, 0x5a, 0xba, 0xd9, 0xb3, 0x8c, 0x90, + 0x28, 0x0b, 0xe5, 0x34, 0x34, 0x2f, 0x0b, 0x9a, 0x73, 0xb7, 0x12, 0x98, 0x70, 0x0a, 0x33, 0x57, + 0x65, 0x8d, 0xc0, 0x73, 0xd9, 0x1a, 0x48, 0xa8, 0xb2, 0xb4, 0x14, 0x0b, 0x28, 0x7a, 0x1e, 0xa6, + 0xba, 0x24, 0x08, 0x8c, 0x0e, 0x61, 0x4c, 0x3d, 0x1d, 0xef, 0x5c, 0xeb, 0xbc, 0x18, 0x47, 0x70, + 0xf4, 0x71, 0x98, 0x30, 0x3d, 0x8b, 0x04, 0x4b, 0x53, 0xcc, 0xa3, 0x7c, 0x99, 0xf1, 0x12, 0x2d, + 0x78, 0x70, 0x58, 0x9d, 0x66, 0x06, 0x08, 0xfd, 0x85, 0x79, 0x25, 0xfd, 0x37, 0xa8, 0x22, 0x99, + 0x52, 0xc8, 0x87, 0x50, 0xc1, 0x1f, 0x9e, 0x7f, 0x4d, 0xff, 0x41, 0x09, 0x2e, 0xd2, 0x1e, 0xfa, + 0x9e, 0xe3, 0x50, 0x21, 0xdb, 0x73, 0xbc, 0x83, 0x2e, 0x71, 0xc3, 0x0f, 0x54, 0x2f, 0xd1, 0x1b, + 0x30, 0xd9, 0xe3, 0xe7, 0x27, 0x63, 0xac, 0x3b, 0xcf, 0xd1, 0x49, 0xe4, 0xa7, 0x1b, 0x0f, 0x0e, + 0xab, 0xcb, 0x59, 0x1f, 0x20, 0xce, 0x45, 0x44, 0x3b, 0x64, 0xa7, 0x34, 0x0e, 0xce, 0x74, 0x9f, + 0x1c, 0x52, 0x83, 0x37, 0xb6, 0x88, 0x33, 0xac, 0x92, 0xa1, 0xff, 0xb4, 0x04, 0x97, 0xe3, 0x1e, + 0x35, 0xdd, 0x20, 0x34, 0x1c, 0x87, 0xdb, 0xc1, 0xe7, 0x6f, 0x22, 0xf5, 0x12, 0x26, 0xd2, 0x46, + 0x51, 0x73, 0x73, 0xb0, 0xef, 0xb9, 0x5e, 0xa2, 0xfd, 0x94, 0x97, 0xa8, 0x75, 0x86, 0x34, 0x8f, + 0x77, 0x18, 0xfd, 0x1f, 0x0d, 0x96, 0xb3, 0x1b, 0x3e, 0x04, 0x83, 0xcd, 0x4b, 0x1a, 0x6c, 0x9f, + 0x3f, 0xbb, 0xaf, 0xce, 0x31, 0xdd, 0xfe, 0x73, 0xee, 0xd7, 0x32, 0x23, 0x6e, 0x1b, 0xe6, 0xa9, + 0x76, 0x1d, 0x84, 0xc2, 0x9d, 0x71, 0xba, 0x73, 0xbe, 0xc8, 0xa5, 0x31, 0x8f, 0x93, 0x38, 0x70, + 0x1a, 0x29, 0xda, 0x80, 0x29, 0xca, 0xed, 0x14, 0x7f, 0x69, 0x78, 0xfc, 0x52, 0xa0, 0xb6, 0x79, + 0x5b, 0x1c, 0x21, 0xd1, 0xff, 0x4c, 0x83, 0xa7, 0x8e, 0x9b, 0x7d, 0xf4, 0x1e, 0x80, 0x19, 0xed, + 0x61, 0xfc, 0x20, 0xb6, 0xa8, 0x16, 0x10, 0x61, 0x89, 0x97, 0x90, 0x2c, 0x0a, 0xb0, 0x42, 0x24, + 0xc3, 0xbb, 0x5f, 0x3a, 0x27, 0xef, 0xbe, 0xfe, 0x7f, 0x35, 0x55, 0x58, 0xa8, 0xa3, 0xff, 0xa8, + 0x09, 0x0b, 0xb5, 0xef, 0xb9, 0x9e, 0x95, 0xe4, 0x92, 0x55, 0x9b, 0x3c, 0x72, 0x4b, 0x56, 0xed, + 0x7c, 0xce, 0x92, 0xfd, 0x3b, 0xa5, 0xbc, 0xaf, 0x65, 0x4b, 0xf6, 0x1e, 0x4c, 0x47, 0x71, 0x33, + 0x11, 0x63, 0x5f, 0x1b, 0xb5, 0x4f, 0x1c, 0x5d, 0x7c, 0xfc, 0x15, 0x95, 0x04, 0x38, 0xa6, 0x85, + 0xf6, 0x01, 0x2c, 0xb9, 0x51, 0x8a, 0xd9, 0xbf, 0x31, 0x1a, 0xe5, 0x78, 0xe3, 0xad, 0xcd, 0x51, + 0x7e, 0x8b, 0x7f, 0x63, 0x85, 0x96, 0xfe, 0xbb, 0x25, 0x40, 0x83, 0xdd, 0xa5, 0xca, 0xc6, 0xae, + 0xed, 0x5a, 0x69, 0x65, 0xe3, 0xa6, 0xed, 0x5a, 0x98, 0x41, 0xa4, 0x3a, 0x52, 0xca, 0x55, 0x47, + 0x5e, 0x83, 0xf9, 0x8e, 0xe3, 0x6d, 0x19, 0x8e, 0x73, 0x20, 0xa2, 0x6f, 0xd8, 0x86, 0x54, 0xae, + 0x5d, 0xa0, 0x72, 0xed, 0x7a, 0x12, 0x84, 0xd3, 0x75, 0x51, 0x0f, 0x16, 0x7c, 0x62, 0x7a, 0xae, + 0x69, 0x3b, 0x4c, 0x79, 0xf4, 0xfa, 0x61, 0x41, 0x13, 0xee, 0xe2, 0xd1, 0x61, 0x75, 0x01, 0xa7, + 0x70, 0xe1, 0x01, 0xec, 0xe8, 0x59, 0x98, 0xea, 0xf9, 0x76, 0xd7, 0xf0, 0x0f, 0x98, 0x7a, 0x5a, + 0xae, 0x55, 0xa8, 0x80, 0x6c, 0xf1, 0x22, 0x1c, 0xc1, 0xf4, 0x6f, 0x69, 0x30, 0xd6, 0xd8, 0x68, + 0x23, 0x1d, 0x26, 0x2d, 0xaf, 0x6b, 0xd8, 0xae, 0x18, 0x25, 0x16, 0xc4, 0xd2, 0x60, 0x25, 0x58, + 0x40, 0x50, 0x0f, 0xa6, 0x23, 0xf9, 0x32, 0xd2, 0xa1, 0x46, 0x63, 0xa3, 0x2d, 0x0f, 0x82, 0x25, + 0x2b, 0x45, 0x25, 0x01, 0x8e, 0x89, 0xe8, 0x06, 0x2c, 0x36, 0x36, 0xda, 0x4d, 0xd7, 0x74, 0xfa, + 0x16, 0x59, 0xdb, 0x67, 0x7f, 0xe8, 0x97, 0xd9, 0xbc, 0x44, 0x04, 0xce, 0xb0, 0x2f, 0x13, 0x95, + 0x70, 0x04, 0xa3, 0xd5, 0x08, 0x6f, 0x21, 0xa2, 0x3e, 0x58, 0x35, 0x81, 0x04, 0x47, 0x30, 0xfd, + 0x47, 0x25, 0xa8, 0x28, 0x1d, 0x42, 0x0e, 0x4c, 0xf1, 0xcf, 0x8d, 0x0e, 0x5d, 0xd7, 0x0a, 0x7e, + 0x62, 0xb2, 0xd7, 0x9c, 0x3a, 0x1f, 0xd0, 0x00, 0x47, 0x24, 0xd4, 0x59, 0x2a, 0xe5, 0xcf, 0x12, + 0x5a, 0x01, 0xe0, 0xc7, 0xcb, 0xec, 0x7c, 0x86, 0x6b, 0xa9, 0x6c, 0x21, 0xb4, 0x65, 0x29, 0x56, + 0x6a, 0xa0, 0xa7, 0x04, 0x3f, 0xf3, 0xd3, 0xe7, 0x72, 0x8a, 0x97, 0xb7, 0x61, 0xe2, 0xbe, 0xe7, + 0x92, 0x40, 0x38, 0xfa, 0xce, 0xe8, 0x03, 0xa7, 0xa9, 0x80, 0x7a, 0x9b, 0xe2, 0xc5, 0x1c, 0xbd, + 0xfe, 0x9b, 0x1a, 0x40, 0xc3, 0x08, 0x0d, 0xee, 0x97, 0x1a, 0x22, 0x70, 0xe7, 0xa9, 0xc4, 0x32, + 0x2c, 0x0f, 0x1c, 0x1d, 0x8c, 0x07, 0xf6, 0xfd, 0xe8, 0xf3, 0xe5, 0xde, 0xc3, 0xb1, 0xb7, 0xed, + 0xfb, 0x04, 0x33, 0x38, 0xfa, 0x18, 0x4c, 0x13, 0xd7, 0xf4, 0x0f, 0x7a, 0x21, 0xb1, 0xd8, 0x08, + 0x94, 0xf9, 0x79, 0xce, 0x5a, 0x54, 0x88, 0x63, 0xb8, 0xbe, 0x07, 0xe5, 0x35, 0xd7, 0xea, 0x79, + 0x36, 0x37, 0x4a, 0x4e, 0xe8, 0xe0, 0xd3, 0x30, 0xd6, 0xf7, 0x1d, 0xd1, 0xbf, 0x8a, 0xa8, 0x30, + 0xb6, 0x89, 0x6f, 0x61, 0x5a, 0x4e, 0x2d, 0xbd, 0x5e, 0xdf, 0xef, 0x79, 0x41, 0xd4, 0x49, 0xa9, + 0x98, 0xb4, 0x78, 0x31, 0x8e, 0xe0, 0xfa, 0x03, 0x0d, 0x16, 0xd6, 0xf6, 0x7b, 0xb6, 0xcf, 0x22, + 0x82, 0x88, 0x4f, 0xb7, 0x74, 0xda, 0x7e, 0x8f, 0xff, 0x2b, 0xfa, 0x20, 0xdb, 0x8b, 0x1a, 0x38, + 0x82, 0xa3, 0x6d, 0x98, 0x23, 0xac, 0x39, 0x15, 0x0a, 0x0d, 0x43, 0xfa, 0xca, 0x4e, 0x63, 0xe8, + 0xf2, 0x80, 0xb3, 0x04, 0x16, 0x9c, 0xc2, 0x8a, 0xda, 0x30, 0x67, 0x3a, 0x46, 0x10, 0xd8, 0xdb, + 0xb6, 0x19, 0x9f, 0x75, 0x4d, 0xd7, 0x3e, 0x46, 0xdb, 0xd6, 0x13, 0x90, 0x07, 0x87, 0xd5, 0x4b, + 0xa2, 0x9f, 0x49, 0x00, 0x4e, 0xa1, 0xd0, 0xff, 0x40, 0x83, 0x69, 0xa9, 0xc8, 0x7c, 0xb0, 0x6c, + 0xc1, 0xe7, 0xa0, 0x6c, 0xd9, 0x81, 0x2a, 0xe0, 0xd9, 0x69, 0x45, 0x43, 0x94, 0x61, 0x09, 0xd5, + 0x03, 0x28, 0x5f, 0x17, 0x6b, 0x04, 0x2d, 0x43, 0xc9, 0x8e, 0xf6, 0x17, 0x10, 0x1f, 0x50, 0x6a, + 0x36, 0x70, 0xc9, 0xb6, 0x24, 0x57, 0x95, 0x72, 0xb9, 0x4a, 0x99, 0xf6, 0xb1, 0xe3, 0xa7, 0x5d, + 0xff, 0x8e, 0x06, 0x95, 0x1b, 0xf6, 0x16, 0xf1, 0x5d, 0xae, 0xc5, 0x3d, 0x9b, 0x8e, 0x11, 0xad, + 0x64, 0xc5, 0x87, 0xa2, 0x7d, 0x98, 0x0e, 0xcc, 0x1d, 0x62, 0xf5, 0x1d, 0x79, 0x56, 0x58, 0x28, + 0x12, 0x54, 0x21, 0xdd, 0x16, 0xf8, 0x94, 0x58, 0x98, 0x88, 0x02, 0x8e, 0x89, 0xe9, 0x5f, 0x86, + 0x0b, 0x19, 0x8d, 0x50, 0x15, 0x26, 0x82, 0xd0, 0xf0, 0x43, 0x31, 0x66, 0x4c, 0x76, 0xb4, 0x69, + 0x01, 0xe6, 0xe5, 0xe8, 0x09, 0x18, 0x23, 0xae, 0x25, 0x06, 0x6d, 0x8a, 0xae, 0xb2, 0x35, 0xd7, + 0xc2, 0xb4, 0x8c, 0x4e, 0x91, 0xe3, 0x25, 0x98, 0x91, 0x4d, 0xd1, 0x2d, 0x51, 0x86, 0x25, 0x54, + 0xff, 0xa5, 0x49, 0x78, 0xfa, 0x86, 0xe7, 0xdb, 0xf7, 0x3d, 0x37, 0x34, 0x9c, 0x96, 0x67, 0xc5, + 0xde, 0x6f, 0x31, 0xdd, 0x5f, 0xd7, 0xe0, 0x71, 0xb3, 0xd7, 0x6f, 0xba, 0x76, 0x68, 0x1b, 0x91, + 0x53, 0xb2, 0x45, 0x7c, 0xdb, 0x2b, 0xea, 0x04, 0x67, 0xb1, 0x85, 0xf5, 0xd6, 0x66, 0x16, 0x4a, + 0x9c, 0x47, 0x0b, 0xbd, 0x0b, 0x73, 0x96, 0x77, 0xcf, 0xe5, 0x2e, 0x52, 0xe2, 0x18, 0x07, 0x05, + 0x5d, 0xdf, 0x8c, 0xc5, 0x1b, 0x09, 0x4c, 0x38, 0x85, 0x99, 0xf9, 0xfd, 0x65, 0x51, 0x3b, 0x64, + 0xb1, 0x95, 0xf7, 0xe3, 0xe1, 0x2c, 0xe8, 0xf7, 0x6f, 0x64, 0x62, 0xc4, 0x39, 0x94, 0xd0, 0x57, + 0xe0, 0x92, 0xcd, 0x07, 0x02, 0x13, 0xc3, 0xb2, 0x5d, 0x12, 0x04, 0xfc, 0xbb, 0x47, 0x70, 0x6c, + 0x37, 0xb3, 0x10, 0xe2, 0x6c, 0x3a, 0xe8, 0x1d, 0x80, 0xe0, 0xc0, 0x35, 0xc5, 0x5c, 0x4f, 0x14, + 0xa2, 0xca, 0xb7, 0x60, 0x89, 0x05, 0x2b, 0x18, 0xe9, 0x2e, 0x14, 0x7a, 0x0e, 0xf1, 0x0d, 0xd7, + 0xe4, 0x7e, 0x3f, 0x8d, 0xef, 0x42, 0x77, 0xa2, 0x42, 0x1c, 0xc3, 0x91, 0x05, 0x33, 0xfd, 0x9e, + 0x32, 0xf9, 0x53, 0xc5, 0x9d, 0xdf, 0x9b, 0x0a, 0x1e, 0x9c, 0xc0, 0xaa, 0xff, 0x33, 0x0d, 0xa6, + 0x44, 0x64, 0x34, 0xfa, 0x68, 0x4a, 0xdf, 0x93, 0x5e, 0x90, 0x94, 0xce, 0x77, 0xc0, 0xec, 0x63, + 0xa1, 0x51, 0x0b, 0xa6, 0x2c, 0xa4, 0x30, 0x08, 0xc2, 0xb1, 0x7a, 0x9e, 0xb0, 0x93, 0x23, 0x95, + 0x5d, 0x21, 0xa6, 0x7f, 0x5b, 0x83, 0xc5, 0x81, 0x56, 0x43, 0x28, 0xf3, 0x0f, 0xd1, 0xbf, 0xf9, + 0x6b, 0x00, 0x2c, 0x08, 0x7e, 0xb5, 0xd5, 0x6c, 0x13, 0x7f, 0x4f, 0x8a, 0x95, 0xbf, 0xa9, 0xc1, + 0x42, 0xec, 0x89, 0x17, 0xbd, 0xd0, 0x8a, 0xc7, 0x15, 0xdc, 0x4c, 0xe1, 0xaa, 0x2d, 0x89, 0xef, + 0x5e, 0x48, 0x43, 0xf0, 0x00, 0x5d, 0xf4, 0x37, 0x34, 0x58, 0x30, 0x92, 0x41, 0xf0, 0xd1, 0x26, + 0x50, 0x28, 0xb8, 0x2b, 0x15, 0x50, 0x1f, 0xf7, 0x25, 0x05, 0x08, 0xf0, 0x00, 0x59, 0xf4, 0x29, + 0x98, 0x31, 0x7a, 0xf6, 0x6a, 0xdf, 0xb2, 0x89, 0x6b, 0xca, 0xc8, 0x69, 0xc6, 0xb8, 0xab, 0xad, + 0xa6, 0x2c, 0xc7, 0x89, 0x5a, 0x32, 0xca, 0x5d, 0x0c, 0xe4, 0xf8, 0x88, 0x51, 0xee, 0x62, 0x0c, + 0xe3, 0x28, 0x77, 0x31, 0x74, 0x2a, 0x11, 0xf4, 0x73, 0xf0, 0x04, 0xdf, 0x3d, 0x6b, 0x46, 0x60, + 0x9b, 0xab, 0xfd, 0x70, 0x87, 0xb8, 0x61, 0xa4, 0x03, 0x71, 0x8b, 0x8a, 0x9d, 0x33, 0xae, 0xe5, + 0x55, 0xc2, 0xf9, 0xed, 0x91, 0x0b, 0xe0, 0xd9, 0x96, 0x29, 0xbe, 0x87, 0x1f, 0x75, 0x15, 0x3a, + 0x06, 0xbe, 0xdd, 0x6c, 0xd4, 0xc5, 0xe7, 0x30, 0x61, 0x14, 0xff, 0xc6, 0x0a, 0x05, 0xf4, 0x4d, + 0x0d, 0x66, 0x05, 0x93, 0x0b, 0x9a, 0x53, 0x6c, 0xfe, 0xdf, 0x2e, 0xca, 0x8c, 0x29, 0x86, 0x5f, + 0xc1, 0x2a, 0x72, 0x1e, 0xae, 0x26, 0x03, 0x1f, 0x12, 0x30, 0x9c, 0xec, 0x07, 0xfa, 0x7b, 0x1a, + 0x5c, 0x0c, 0x88, 0xbf, 0x67, 0x9b, 0x64, 0xd5, 0x34, 0xbd, 0xbe, 0x1b, 0x4d, 0x72, 0xb9, 0xb8, + 0xdf, 0xa0, 0x9d, 0x81, 0xaf, 0xb6, 0x74, 0x74, 0x58, 0xbd, 0x98, 0x05, 0xc1, 0x99, 0xf4, 0xe9, + 0x2e, 0x39, 0x7f, 0xcf, 0x08, 0xcd, 0x9d, 0xba, 0x61, 0xee, 0x30, 0xf3, 0x22, 0x58, 0x9a, 0x2e, + 0x1e, 0x11, 0xf9, 0x56, 0x12, 0x15, 0x77, 0x1b, 0xa4, 0x0a, 0x71, 0x9a, 0x20, 0x0a, 0xa0, 0xec, + 0x93, 0xf7, 0xfa, 0x24, 0x08, 0x83, 0x25, 0x60, 0xc4, 0x9b, 0x23, 0xcf, 0x18, 0x16, 0x08, 0xb9, + 0xd6, 0x14, 0xfd, 0xc2, 0x92, 0xd0, 0xf2, 0x1b, 0x80, 0x06, 0xa7, 0x13, 0x2d, 0xc0, 0xd8, 0x2e, + 0xe1, 0x37, 0x4c, 0xa6, 0x31, 0xfd, 0x17, 0x5d, 0x84, 0x89, 0x3d, 0xc3, 0xe9, 0x73, 0xcd, 0xb6, + 0x8c, 0xf9, 0x8f, 0x57, 0x4a, 0x9f, 0xd1, 0xf4, 0xdf, 0xd3, 0xe0, 0x52, 0x26, 0x4d, 0x84, 0xe1, + 0x72, 0xd7, 0xd8, 0xdf, 0xf0, 0xdc, 0xf5, 0x7e, 0x68, 0x84, 0xb6, 0xdb, 0x69, 0xba, 0xdb, 0x8e, + 0xdd, 0xd9, 0xe1, 0x8a, 0xe0, 0x04, 0x57, 0x25, 0xd6, 0x33, 0x6b, 0xe0, 0x9c, 0x96, 0xa8, 0x09, + 0x17, 0xba, 0xc6, 0xfe, 0x00, 0xc2, 0x12, 0x43, 0xc8, 0xee, 0x29, 0xad, 0x0f, 0x82, 0x71, 0x56, + 0x1b, 0xfd, 0x1b, 0xe3, 0xf0, 0x24, 0xed, 0x78, 0xbc, 0xdf, 0xac, 0x1b, 0xae, 0xd1, 0xf9, 0x60, + 0xca, 0xf5, 0xef, 0x68, 0xf0, 0xf8, 0x4e, 0xb6, 0x76, 0x2b, 0x76, 0xbc, 0x37, 0x0b, 0xe9, 0xf8, + 0xc7, 0x29, 0xcc, 0xfc, 0xa0, 0xff, 0xd8, 0x2a, 0x38, 0xaf, 0x53, 0xe8, 0x0d, 0x58, 0x70, 0x3d, + 0x8b, 0xd4, 0x9b, 0x0d, 0xbc, 0x6e, 0x04, 0xbb, 0xed, 0xc8, 0x98, 0x9f, 0xe0, 0x4e, 0xad, 0x8d, + 0x14, 0x0c, 0x0f, 0xd4, 0x46, 0x7b, 0x80, 0x7a, 0x9e, 0xb5, 0xb6, 0x67, 0x9b, 0xd1, 0x89, 0x6f, + 0x71, 0x47, 0x1a, 0x0b, 0xc2, 0x6e, 0x0d, 0x60, 0xc3, 0x19, 0x14, 0xf4, 0x3f, 0xd2, 0x60, 0x9e, + 0xce, 0x48, 0xcb, 0xf7, 0xf6, 0x0f, 0x3e, 0x88, 0xbc, 0xf0, 0x3c, 0x8c, 0x77, 0x3d, 0x2b, 0x32, + 0x32, 0x2f, 0x51, 0x8d, 0x68, 0xdd, 0xb3, 0xc8, 0x03, 0xee, 0x58, 0xdb, 0x3f, 0xa0, 0x3f, 0x30, + 0xab, 0xa2, 0xff, 0x37, 0x8d, 0xeb, 0x2c, 0x91, 0x2d, 0xf6, 0x81, 0xe4, 0xed, 0x97, 0x60, 0x96, + 0x96, 0xad, 0x1b, 0xfb, 0xad, 0xc6, 0x5d, 0xcf, 0x89, 0x22, 0x10, 0x58, 0x2c, 0xf0, 0x4d, 0x15, + 0x80, 0x93, 0xf5, 0xf4, 0x6f, 0xce, 0x02, 0xab, 0xe0, 0x90, 0xf0, 0x83, 0xf8, 0x5d, 0x2f, 0x40, + 0xc5, 0xec, 0xf5, 0xeb, 0xd7, 0xda, 0x6f, 0xf6, 0xbd, 0xd0, 0x10, 0x3e, 0x3f, 0xa6, 0x88, 0xd4, + 0x5b, 0x9b, 0x51, 0x31, 0x56, 0xeb, 0xd0, 0x55, 0x63, 0xf6, 0xfa, 0x42, 0x0e, 0xb5, 0xd4, 0x73, + 0x6a, 0xb6, 0x6a, 0xea, 0xad, 0xcd, 0x04, 0x0c, 0x0f, 0xd4, 0x46, 0x5f, 0x81, 0x19, 0x22, 0x18, + 0xfa, 0x86, 0xe1, 0x5b, 0x62, 0xbd, 0x14, 0xde, 0x49, 0xe4, 0xd0, 0x46, 0xab, 0x84, 0xeb, 0x6f, + 0x6b, 0x0a, 0x09, 0x9c, 0x20, 0xc8, 0x74, 0x29, 0xf1, 0x9b, 0xce, 0x94, 0x67, 0x5d, 0xf7, 0x0d, + 0x93, 0x28, 0xa6, 0xd7, 0x84, 0xd0, 0xa5, 0xf2, 0x2a, 0xe1, 0xfc, 0xf6, 0xe8, 0xb7, 0x35, 0xb8, + 0x2c, 0xa1, 0xb6, 0x6b, 0x77, 0xfb, 0x5d, 0x4c, 0x4c, 0xc7, 0xb0, 0xbb, 0x42, 0xb1, 0x7a, 0xeb, + 0xcc, 0x3e, 0x34, 0x89, 0x9e, 0x6f, 0x56, 0xd9, 0x30, 0x9c, 0xd3, 0x25, 0xf4, 0x6d, 0x0d, 0x9e, + 0x89, 0x40, 0x2d, 0x6a, 0xda, 0xf4, 0x7d, 0x12, 0xc7, 0xb0, 0x88, 0x21, 0x29, 0x66, 0xfe, 0x7d, + 0xe4, 0xe8, 0xb0, 0xfa, 0xcc, 0xda, 0x09, 0xb8, 0xf1, 0x89, 0xd4, 0x55, 0x76, 0x69, 0x7b, 0xdb, + 0xa1, 0xd0, 0xc4, 0xce, 0x8b, 0x5d, 0x28, 0x09, 0x9c, 0x20, 0x88, 0x7e, 0x47, 0x83, 0xc7, 0xd5, + 0x02, 0x95, 0x5b, 0xb8, 0x0a, 0xf6, 0x85, 0x33, 0xeb, 0x4c, 0x0a, 0x3f, 0x77, 0xdf, 0xe4, 0x00, + 0x71, 0x5e, 0xaf, 0xd0, 0xb3, 0x30, 0xd5, 0x65, 0x8c, 0xc9, 0xd5, 0xb4, 0x09, 0xee, 0x86, 0xe3, + 0xbc, 0x1a, 0xe0, 0x08, 0x46, 0xad, 0x9f, 0x9e, 0x67, 0xb5, 0x6c, 0x2b, 0xb8, 0x65, 0x77, 0xed, + 0x70, 0xa9, 0xc2, 0xae, 0x0d, 0xb0, 0xe1, 0x68, 0x79, 0x56, 0xab, 0xd9, 0xe0, 0xe5, 0x38, 0x51, + 0x8b, 0x85, 0x48, 0xda, 0x5d, 0xa3, 0x43, 0x5a, 0x7d, 0xc7, 0x69, 0xf9, 0x1e, 0xb3, 0x88, 0x1b, + 0xc4, 0xb0, 0x1c, 0xdb, 0x25, 0x4b, 0x33, 0xc5, 0x43, 0x24, 0x9b, 0x79, 0x48, 0x71, 0x3e, 0x3d, + 0xb4, 0x02, 0xb0, 0x6d, 0xd8, 0x4e, 0xfb, 0x9e, 0xd1, 0xbb, 0xed, 0x2e, 0xcd, 0x32, 0x01, 0xc6, + 0x4c, 0x8f, 0x6b, 0xb2, 0x14, 0x2b, 0x35, 0x28, 0x37, 0x51, 0x29, 0x88, 0x09, 0xbf, 0xfe, 0xb0, + 0x34, 0x77, 0x46, 0xdc, 0x14, 0x21, 0xe4, 0xc3, 0x77, 0x53, 0x21, 0x81, 0x13, 0x04, 0xd1, 0xd7, + 0x35, 0x98, 0x0b, 0x0e, 0x82, 0x90, 0x74, 0x65, 0x1f, 0xe6, 0xcf, 0xba, 0x0f, 0xcc, 0x57, 0xd0, + 0x4e, 0x10, 0xc1, 0x29, 0xa2, 0xfa, 0x61, 0x89, 0x2b, 0xc5, 0x03, 0x2c, 0x88, 0x5e, 0x83, 0xf9, + 0x2e, 0xe9, 0x7a, 0xfe, 0xc1, 0x6a, 0x74, 0x45, 0x58, 0x78, 0x37, 0x98, 0x91, 0xb0, 0x9e, 0x04, + 0xe1, 0x74, 0x5d, 0xba, 0x41, 0xb0, 0xe9, 0xba, 0xd6, 0x8e, 0xdb, 0x97, 0xe2, 0x0d, 0xa2, 0x99, + 0x82, 0xe1, 0x81, 0xda, 0xa8, 0x0e, 0x8b, 0xa2, 0xac, 0x49, 0x55, 0xae, 0xe0, 0x9a, 0x4f, 0xa2, + 0x13, 0x0c, 0xaa, 0x4a, 0x2c, 0x36, 0xd3, 0x40, 0x3c, 0x58, 0x9f, 0x7e, 0x05, 0xfd, 0xa1, 0xf6, + 0x62, 0x3c, 0xfe, 0x8a, 0x8d, 0x24, 0x08, 0xa7, 0xeb, 0x46, 0xca, 0x61, 0xa2, 0x0b, 0x13, 0xf1, + 0x57, 0x6c, 0xa4, 0x60, 0x78, 0xa0, 0xb6, 0xfe, 0xdf, 0xc7, 0xe1, 0xc3, 0x43, 0x88, 0x6d, 0xd4, + 0xcd, 0x1e, 0xee, 0x13, 0x16, 0xd1, 0x4a, 0x74, 0xc0, 0xbd, 0xf2, 0x66, 0xdf, 0x70, 0x43, 0x3b, + 0x3c, 0x18, 0x72, 0x7a, 0x7a, 0x39, 0xd3, 0x73, 0x7a, 0x7a, 0xc3, 0x4e, 0x67, 0x90, 0x37, 0x9d, + 0xa7, 0x27, 0x39, 0xfc, 0xf4, 0x77, 0xb3, 0xa7, 0xbf, 0xe0, 0xa8, 0x9e, 0xc8, 0x2e, 0xbd, 0x1c, + 0x76, 0x29, 0x38, 0xaa, 0x43, 0xb0, 0xd7, 0x9f, 0x8c, 0xc3, 0x47, 0x86, 0xd9, 0x42, 0x0a, 0xf2, + 0x57, 0x86, 0x90, 0x3e, 0x57, 0xfe, 0xca, 0x0b, 0x2d, 0x38, 0x47, 0xfe, 0xca, 0x20, 0x79, 0xde, + 0xfc, 0x95, 0x37, 0xaa, 0xe7, 0xc5, 0x5f, 0x79, 0xa3, 0x3a, 0x04, 0x7f, 0xfd, 0x69, 0x7a, 0x7f, + 0x90, 0x3b, 0x58, 0x13, 0xc6, 0xcc, 0x5e, 0xbf, 0xa0, 0x90, 0x62, 0x67, 0x67, 0xf5, 0xd6, 0x26, + 0xa6, 0x38, 0x10, 0x86, 0x49, 0xce, 0x3f, 0x05, 0x45, 0x10, 0x0b, 0x0b, 0xe1, 0x2c, 0x89, 0x05, + 0x26, 0x3a, 0x54, 0xa4, 0xb7, 0x43, 0xba, 0xc4, 0x37, 0x9c, 0x76, 0xe8, 0xf9, 0x46, 0xa7, 0xa8, + 0xb4, 0x61, 0x43, 0xb5, 0x96, 0xc2, 0x85, 0x07, 0xb0, 0xd3, 0x01, 0xe9, 0xd9, 0x56, 0x41, 0xf9, + 0xc2, 0x06, 0xa4, 0xd5, 0x6c, 0x60, 0x8a, 0x43, 0xff, 0xc3, 0x32, 0x28, 0xf1, 0xf3, 0xd4, 0x52, + 0x31, 0x1c, 0xc7, 0xbb, 0xd7, 0xf2, 0xed, 0x3d, 0xdb, 0x21, 0x1d, 0x62, 0xc9, 0x00, 0xeb, 0x40, + 0x9c, 0xb0, 0x32, 0xd5, 0x69, 0x35, 0xaf, 0x12, 0xce, 0x6f, 0x4f, 0x2d, 0xd1, 0x45, 0x33, 0x7d, + 0x0d, 0x67, 0x94, 0x33, 0x95, 0x81, 0x3b, 0x3d, 0x7c, 0x3d, 0x0d, 0x14, 0xe3, 0x41, 0xb2, 0xe8, + 0xab, 0x1a, 0x37, 0xb1, 0xa5, 0x8f, 0x4e, 0xcc, 0xd9, 0xf5, 0x33, 0x72, 0x09, 0xc7, 0xb6, 0x7a, + 0xec, 0x05, 0x4c, 0x12, 0xa4, 0xb6, 0xd0, 0xa5, 0xdd, 0x2c, 0x6f, 0x9b, 0x98, 0xd9, 0xdb, 0x45, + 0xbb, 0x92, 0xe3, 0xbe, 0xe3, 0xa7, 0x84, 0x99, 0x15, 0x70, 0x76, 0x47, 0xe4, 0x28, 0x49, 0x67, + 0x89, 0x10, 0x02, 0x85, 0x47, 0x29, 0xe5, 0x75, 0x89, 0x47, 0x49, 0x02, 0x70, 0x92, 0x20, 0xea, + 0xc1, 0xf4, 0x6e, 0xe4, 0x7a, 0x12, 0x16, 0x6d, 0xbd, 0x28, 0x75, 0xc5, 0x7f, 0xc5, 0x4f, 0x23, + 0x65, 0x21, 0x8e, 0x89, 0xa0, 0x1d, 0x98, 0xda, 0xe5, 0x82, 0x48, 0x58, 0xa2, 0xab, 0x23, 0x6b, + 0xca, 0xdc, 0x20, 0x12, 0x45, 0x38, 0x42, 0xaf, 0x46, 0x3e, 0x94, 0x4f, 0x08, 0x78, 0xf9, 0x75, + 0x0d, 0x2e, 0xed, 0x11, 0x3f, 0xb4, 0xcd, 0xb4, 0xaf, 0x73, 0xba, 0xb8, 0x36, 0x7f, 0x37, 0x0b, + 0x21, 0x67, 0x93, 0x4c, 0x10, 0xce, 0xee, 0x82, 0xfe, 0x53, 0x0d, 0x06, 0xbc, 0x3f, 0xe8, 0x57, + 0x34, 0x98, 0xd9, 0x26, 0x46, 0xd8, 0xf7, 0xc9, 0x75, 0x23, 0x94, 0x41, 0x98, 0x77, 0xcf, 0xc2, + 0xe9, 0xb4, 0x72, 0x4d, 0x41, 0xcc, 0xcf, 0x5b, 0xe4, 0x5d, 0x4f, 0x15, 0x84, 0x13, 0x3d, 0x58, + 0x7e, 0x1d, 0x16, 0x07, 0x1a, 0x9e, 0xca, 0xb3, 0xff, 0xef, 0x84, 0xf3, 0x30, 0x9d, 0xe0, 0xeb, + 0x1d, 0x98, 0x30, 0x2c, 0x4b, 0xa6, 0x0f, 0x79, 0xb9, 0xd8, 0xb9, 0xa2, 0xa5, 0xc6, 0xba, 0xb2, + 0x9f, 0x98, 0xa3, 0x45, 0xd7, 0x00, 0x19, 0x89, 0x23, 0xb4, 0x75, 0xcf, 0x8a, 0xac, 0x24, 0xe6, + 0xc8, 0x5d, 0x1d, 0x80, 0xe2, 0x8c, 0x16, 0xfa, 0xab, 0x30, 0x97, 0xbc, 0x45, 0x75, 0x8a, 0x98, + 0x2b, 0xfd, 0xaf, 0x6b, 0x80, 0x06, 0xaf, 0x16, 0x23, 0x1f, 0xca, 0xa2, 0x46, 0x34, 0xc5, 0x85, + 0xfc, 0x8a, 0xe9, 0x68, 0xb0, 0x38, 0xd8, 0x58, 0x14, 0x04, 0x58, 0xd2, 0xd1, 0xff, 0x4c, 0x83, + 0x38, 0x3f, 0x01, 0xfa, 0x34, 0x54, 0x2c, 0x12, 0x98, 0xbe, 0xdd, 0x0b, 0xe3, 0xef, 0x90, 0xd7, + 0x42, 0x1b, 0x31, 0x08, 0xab, 0xf5, 0x90, 0x0e, 0x93, 0xa1, 0x11, 0xec, 0x36, 0x1b, 0xc2, 0x5c, + 0x64, 0x9b, 0xfb, 0x1d, 0x56, 0x82, 0x05, 0x24, 0xbe, 0x91, 0x34, 0x36, 0xc4, 0x8d, 0x24, 0xb4, + 0x7d, 0x06, 0xd7, 0xaf, 0xd0, 0xc9, 0x57, 0xaf, 0xf4, 0xff, 0x54, 0x82, 0x64, 0x4a, 0x88, 0xa2, + 0x43, 0x30, 0x78, 0x5f, 0xac, 0x74, 0x6e, 0xf7, 0xc5, 0x3e, 0xce, 0xf2, 0x29, 0xf1, 0x04, 0x78, + 0xfc, 0xd8, 0x43, 0xcd, 0x82, 0xc4, 0xd3, 0xd7, 0xc9, 0x1a, 0xe8, 0x65, 0x16, 0x21, 0x15, 0x46, + 0x46, 0xf4, 0x87, 0xa3, 0x65, 0xd1, 0xa6, 0x85, 0x0f, 0xc4, 0x8d, 0x38, 0xf9, 0xfd, 0xac, 0x14, + 0xf3, 0x16, 0xe8, 0xd3, 0x22, 0xa0, 0x6e, 0x22, 0x71, 0x6b, 0x2f, 0xba, 0xea, 0xb7, 0x98, 0x68, + 0x18, 0x47, 0xd9, 0xe9, 0x7f, 0x57, 0x83, 0x29, 0x71, 0x4f, 0x7c, 0x88, 0x98, 0xbc, 0x6d, 0x98, + 0x60, 0x4a, 0xfa, 0x28, 0xfa, 0x4b, 0x7b, 0xc7, 0xf3, 0xc2, 0xc4, 0x6d, 0x79, 0x16, 0x08, 0xc6, + 0xfe, 0xc5, 0x1c, 0xbd, 0xfe, 0xad, 0x71, 0x78, 0x46, 0x54, 0x19, 0xd8, 0x9e, 0xe5, 0x12, 0x3c, + 0x80, 0x0b, 0x62, 0x96, 0x1a, 0xbe, 0x61, 0xcb, 0x83, 0xa1, 0x62, 0x66, 0x97, 0x38, 0x32, 0x1c, + 0x40, 0x87, 0xb3, 0x68, 0xa0, 0x9f, 0x87, 0x8b, 0xa2, 0xf8, 0x06, 0x31, 0x9c, 0x70, 0x27, 0xa2, + 0x5d, 0xcc, 0x04, 0x63, 0xa7, 0xd4, 0xeb, 0x19, 0xf8, 0x70, 0x26, 0x15, 0x16, 0xcb, 0x25, 0x00, + 0x75, 0x9f, 0x18, 0xea, 0xa9, 0xd8, 0x08, 0xb1, 0x5c, 0xeb, 0x99, 0x18, 0x71, 0x0e, 0x25, 0xe6, + 0xbf, 0x32, 0xf6, 0x99, 0x39, 0x8c, 0x49, 0xe8, 0xdb, 0x2c, 0x7f, 0x01, 0xe5, 0x6f, 0x6e, 0xc0, + 0x26, 0x41, 0x38, 0x5d, 0x17, 0xbd, 0x02, 0x73, 0xec, 0xa0, 0x2f, 0xbe, 0x86, 0x33, 0x11, 0xe7, + 0x43, 0xdc, 0x48, 0x40, 0x70, 0xaa, 0xa6, 0xfe, 0x5b, 0x1a, 0xcc, 0xa8, 0x0c, 0x34, 0x44, 0x0c, + 0x6f, 0x5f, 0x11, 0xd7, 0x23, 0x84, 0x42, 0xaa, 0x54, 0x87, 0x91, 0xd8, 0x0f, 0x34, 0xb8, 0x90, + 0xd1, 0x86, 0x1d, 0x4f, 0x91, 0x94, 0xe8, 0x1f, 0xe5, 0x78, 0x6a, 0x60, 0x1b, 0x91, 0xc7, 0x53, + 0x69, 0x08, 0x1e, 0xa0, 0x8b, 0xee, 0xc2, 0x98, 0xe9, 0xdb, 0x62, 0x58, 0x5e, 0x2a, 0x64, 0x92, + 0xe0, 0x66, 0x1c, 0x18, 0x5d, 0xc7, 0x4d, 0x4c, 0x11, 0xea, 0xff, 0x6a, 0x0c, 0x2a, 0x4a, 0x0a, + 0x0a, 0xb4, 0x3e, 0x8a, 0x45, 0x1b, 0xa3, 0x8f, 0xac, 0xda, 0x75, 0x18, 0xeb, 0xf4, 0xfa, 0x05, + 0x4d, 0x5a, 0x89, 0xee, 0x3a, 0x45, 0xd7, 0xe9, 0xf5, 0xd1, 0x5d, 0x69, 0x24, 0x17, 0x33, 0x63, + 0x65, 0x2c, 0x5d, 0xca, 0x50, 0x8e, 0x78, 0x73, 0x3c, 0x97, 0x37, 0xbb, 0x30, 0x15, 0x08, 0x0b, + 0x7a, 0xa2, 0x78, 0xfa, 0x13, 0x65, 0xa4, 0x85, 0xc5, 0xcc, 0xd5, 0xef, 0xc8, 0xa0, 0x8e, 0x68, + 0x50, 0x05, 0xa0, 0xcf, 0xa2, 0x99, 0x99, 0x5d, 0x51, 0xe6, 0x0a, 0xc0, 0x26, 0x2b, 0xc1, 0x02, + 0xa2, 0xff, 0x4b, 0x0d, 0xd0, 0x20, 0x42, 0xf4, 0x61, 0x98, 0x60, 0x41, 0xdd, 0x62, 0xa1, 0x29, + 0x37, 0xdf, 0x8d, 0x20, 0xc0, 0x1c, 0x86, 0xde, 0x12, 0x11, 0xfb, 0xc5, 0x26, 0x46, 0xee, 0xde, + 0x82, 0xa6, 0x12, 0xe2, 0x1f, 0x6d, 0x4f, 0x63, 0x79, 0xdb, 0x93, 0xfe, 0x27, 0x25, 0xca, 0x71, + 0xb6, 0x1b, 0x12, 0x97, 0x45, 0x58, 0x1e, 0x00, 0x18, 0xfd, 0xd0, 0xe3, 0xdb, 0xb1, 0x60, 0xbc, + 0x66, 0xb1, 0xc1, 0x95, 0x48, 0x57, 0x25, 0x42, 0x7e, 0x02, 0x12, 0xff, 0xc6, 0x0a, 0x31, 0x4a, + 0x3a, 0xb4, 0xbb, 0xe4, 0x2d, 0xdb, 0xb5, 0xbc, 0x7b, 0x62, 0x2c, 0x46, 0x25, 0x7d, 0x47, 0x22, + 0xe4, 0xa4, 0xe3, 0xdf, 0x58, 0x21, 0x86, 0xbe, 0x00, 0x4b, 0x2c, 0xa1, 0xaa, 0xcb, 0x52, 0xf1, + 0x88, 0xbe, 0x79, 0x8e, 0x13, 0xed, 0x0f, 0xe5, 0xda, 0x53, 0x47, 0x87, 0xd5, 0xa5, 0x7a, 0x4e, + 0x1d, 0x9c, 0xdb, 0x5a, 0xff, 0x1d, 0x0d, 0x2e, 0x65, 0x0e, 0x05, 0xba, 0x0e, 0x8b, 0xf1, 0xb1, + 0xb7, 0x2a, 0xd0, 0xca, 0x71, 0x66, 0xa9, 0x9b, 0xe9, 0x0a, 0x78, 0xb0, 0x0d, 0x5a, 0x97, 0x9b, + 0xba, 0x2a, 0x30, 0xc5, 0x99, 0xf9, 0x93, 0x02, 0x55, 0x96, 0x4c, 0xc5, 0x59, 0xed, 0xf4, 0x9f, + 0x4b, 0x74, 0x38, 0x1e, 0x30, 0xca, 0xca, 0x5b, 0xa4, 0x23, 0x23, 0x61, 0x25, 0x2b, 0xd7, 0x68, + 0x21, 0xe6, 0x30, 0xf4, 0xb4, 0x1a, 0x8f, 0x2e, 0x45, 0x46, 0x14, 0x93, 0xae, 0x87, 0x00, 0xeb, + 0x9e, 0x6b, 0x87, 0x9e, 0x6f, 0xbb, 0x1d, 0xb4, 0x0d, 0x65, 0x43, 0x64, 0xfb, 0x15, 0xac, 0xf6, + 0xd9, 0x42, 0x06, 0x91, 0xc0, 0xc1, 0x23, 0xb5, 0xa2, 0x5f, 0x58, 0xe2, 0xd6, 0xff, 0xa9, 0x06, + 0x97, 0xa9, 0xf4, 0xb0, 0xa2, 0xab, 0x6e, 0xf2, 0x4a, 0xec, 0x10, 0xfb, 0x60, 0x17, 0x2a, 0x7e, + 0xdc, 0x4c, 0xf0, 0xe5, 0xcf, 0xaa, 0xd7, 0x6d, 0x95, 0xd4, 0xdf, 0x54, 0x47, 0xa8, 0xfb, 0x5e, + 0x10, 0x4d, 0x4e, 0xfa, 0x06, 0xae, 0x5c, 0xab, 0x4a, 0x4f, 0xb0, 0x8a, 0x5f, 0xff, 0x5a, 0x09, + 0x60, 0x83, 0x84, 0xf7, 0x3c, 0x7f, 0x97, 0x0e, 0xd1, 0x07, 0xea, 0xd2, 0xc7, 0x53, 0x30, 0xde, + 0xf3, 0xac, 0x40, 0x88, 0x13, 0x76, 0xef, 0x88, 0x1d, 0xdc, 0xb2, 0x52, 0x54, 0x85, 0x09, 0xe6, + 0xa5, 0x15, 0x72, 0x9b, 0xa9, 0xa8, 0x54, 0x2d, 0x09, 0x30, 0x2f, 0xe7, 0xb9, 0xf3, 0x58, 0x08, + 0x61, 0x20, 0x74, 0x6e, 0x91, 0x3b, 0x8f, 0x97, 0x61, 0x09, 0xd5, 0xbf, 0x36, 0x0e, 0x89, 0x74, + 0xd5, 0xb1, 0xdd, 0xac, 0x9d, 0x8f, 0xdd, 0xfc, 0x05, 0x58, 0x72, 0x3c, 0xc3, 0xaa, 0x19, 0x0e, + 0x65, 0x7a, 0xbf, 0xcd, 0xa7, 0xc3, 0x70, 0x3b, 0x32, 0x17, 0x32, 0x13, 0x00, 0xb7, 0x72, 0xea, + 0xe0, 0xdc, 0xd6, 0x28, 0x94, 0x49, 0xb2, 0x79, 0xa6, 0xac, 0x5b, 0xa3, 0x26, 0xf3, 0x5e, 0x51, + 0x83, 0x47, 0xe5, 0x16, 0x9a, 0xcc, 0xa3, 0x8d, 0x7e, 0x41, 0x83, 0x4b, 0x64, 0x3f, 0x24, 0xbe, + 0x6b, 0x38, 0x77, 0x7c, 0x63, 0x7b, 0xdb, 0x36, 0x45, 0x48, 0x0c, 0x9f, 0x9c, 0xd6, 0xd1, 0x61, + 0xf5, 0xd2, 0x5a, 0x56, 0x85, 0x07, 0x87, 0xd5, 0x4f, 0x0e, 0xe6, 0xaa, 0x8f, 0x62, 0x45, 0x33, + 0x9b, 0x30, 0x66, 0xcc, 0x26, 0xb7, 0xfc, 0x32, 0x54, 0x4e, 0x11, 0x1d, 0x39, 0xad, 0xfa, 0x50, + 0xfe, 0xc1, 0x24, 0x28, 0x71, 0xba, 0xa7, 0xc8, 0x8f, 0xf6, 0x8f, 0x34, 0xb8, 0x68, 0x3a, 0x36, + 0x71, 0xc3, 0x54, 0x38, 0x32, 0x5f, 0x18, 0x9b, 0x85, 0x02, 0x88, 0x7b, 0xc4, 0x6d, 0x36, 0xea, + 0x9e, 0xeb, 0x12, 0x33, 0xac, 0x67, 0x20, 0xe7, 0x26, 0x49, 0x16, 0x04, 0x67, 0x76, 0x86, 0x7d, + 0x0f, 0x2b, 0x6f, 0x36, 0xd4, 0xeb, 0x39, 0x75, 0x51, 0x86, 0x25, 0x14, 0xbd, 0x00, 0x95, 0x8e, + 0xef, 0xf5, 0x7b, 0x41, 0x9d, 0x45, 0xeb, 0xf0, 0x19, 0x64, 0xc1, 0x50, 0xd7, 0xe3, 0x62, 0xac, + 0xd6, 0x41, 0x9f, 0x82, 0x19, 0xfe, 0xb3, 0xe5, 0x93, 0x6d, 0x7b, 0x5f, 0x2c, 0x37, 0x16, 0x02, + 0x70, 0x5d, 0x29, 0xc7, 0x89, 0x5a, 0xe8, 0x63, 0x30, 0x6d, 0x07, 0x41, 0x9f, 0xf8, 0x9b, 0xf8, + 0x96, 0xc8, 0xc1, 0xc2, 0xbc, 0x9f, 0xcd, 0xa8, 0x10, 0xc7, 0x70, 0xf4, 0xab, 0x1a, 0xcc, 0xf9, + 0xe4, 0xbd, 0xbe, 0xed, 0x13, 0x8b, 0x11, 0x0d, 0x44, 0xb0, 0x34, 0x1e, 0x2d, 0x40, 0x7b, 0x05, + 0x27, 0x90, 0x72, 0x3e, 0x97, 0xbe, 0x83, 0x24, 0x10, 0xa7, 0x7a, 0x40, 0x87, 0x2a, 0xb0, 0x3b, + 0xae, 0xed, 0x76, 0x56, 0x9d, 0x4e, 0xb0, 0x54, 0x66, 0x4b, 0x97, 0x0d, 0x55, 0x3b, 0x2e, 0xc6, + 0x6a, 0x1d, 0xf4, 0x12, 0xcc, 0xf6, 0x03, 0xca, 0xb9, 0x5d, 0xc2, 0xc7, 0x77, 0x3a, 0x0e, 0xa1, + 0xdb, 0x54, 0x01, 0x38, 0x59, 0x8f, 0xda, 0x63, 0x51, 0x81, 0x18, 0x65, 0xe0, 0x37, 0x2e, 0x69, + 0x3f, 0x37, 0x13, 0x10, 0x9c, 0xaa, 0xb9, 0xbc, 0x0a, 0x17, 0x32, 0x3e, 0xf3, 0x54, 0xcb, 0xe3, + 0xb7, 0x4a, 0xf0, 0xa1, 0x13, 0xb9, 0x12, 0xfd, 0x43, 0x0d, 0x2a, 0x64, 0x3f, 0xf4, 0x0d, 0x19, + 0xd0, 0x47, 0xa7, 0x68, 0xfb, 0x5c, 0x96, 0xc0, 0xca, 0x5a, 0x4c, 0x88, 0x4f, 0x9b, 0xdc, 0xee, + 0x14, 0x08, 0x56, 0xfb, 0x43, 0x55, 0x6b, 0x7e, 0x1f, 0x57, 0xf5, 0xad, 0x89, 0xac, 0xbe, 0x02, + 0xb2, 0xfc, 0x39, 0x58, 0x48, 0x63, 0x3e, 0xd5, 0x48, 0xfd, 0x8b, 0x12, 0x4c, 0xb4, 0x1c, 0xc3, + 0x7d, 0x18, 0x59, 0xe3, 0xff, 0x72, 0x22, 0x99, 0x43, 0xa1, 0x0c, 0x19, 0xac, 0xab, 0xb9, 0x89, + 0x5e, 0x3a, 0xa9, 0x44, 0x2f, 0xaf, 0x17, 0x27, 0x71, 0x7c, 0x5e, 0x97, 0x3f, 0xd2, 0x60, 0x9a, + 0xd5, 0x7b, 0x08, 0x39, 0x21, 0xde, 0x49, 0xe6, 0x84, 0x78, 0xb9, 0xf0, 0x37, 0xe5, 0xa4, 0x80, + 0xf8, 0x61, 0xf4, 0x2d, 0x2c, 0xe3, 0xc3, 0x17, 0xd5, 0x6c, 0xe6, 0xfc, 0x63, 0x9e, 0xcb, 0x4a, + 0x9f, 0x72, 0xcb, 0x33, 0x0d, 0x27, 0xad, 0xc1, 0x1d, 0x9f, 0xd2, 0xbc, 0x0b, 0xd3, 0x44, 0x5c, + 0x93, 0x8e, 0x3e, 0xa6, 0x90, 0x4a, 0x1b, 0xdd, 0xb5, 0x8e, 0xc9, 0x45, 0x25, 0x01, 0x8e, 0x29, + 0xe8, 0xff, 0xb6, 0x04, 0x15, 0x65, 0x2e, 0xdf, 0x8f, 0x2c, 0x2d, 0xd7, 0x32, 0x13, 0xfd, 0x96, + 0x58, 0xc4, 0xde, 0xe5, 0x53, 0x24, 0x0b, 0xf7, 0xa1, 0x62, 0xc6, 0x09, 0xe7, 0x46, 0x61, 0x6e, + 0x25, 0x6f, 0x9d, 0x08, 0x19, 0x8e, 0x0b, 0xb0, 0x4a, 0x44, 0xff, 0xd7, 0x25, 0x98, 0x6a, 0xf9, + 0x1e, 0x9d, 0xe0, 0x87, 0x20, 0x1a, 0x8c, 0x84, 0x68, 0x28, 0xb6, 0x6e, 0x79, 0x67, 0x73, 0x85, + 0x83, 0x9d, 0x12, 0x0e, 0xab, 0xa3, 0x10, 0x39, 0x5e, 0x3c, 0xfc, 0x47, 0x0d, 0x2a, 0xa2, 0xe6, + 0x43, 0x10, 0x10, 0x5f, 0x4a, 0x0a, 0x88, 0x57, 0x47, 0xf8, 0xae, 0x1c, 0x11, 0xf1, 0xeb, 0x1a, + 0xcc, 0x8a, 0x1a, 0xeb, 0xa4, 0xbb, 0x45, 0x7c, 0x74, 0x0d, 0xa6, 0x82, 0x3e, 0x9b, 0x48, 0xf1, + 0x41, 0x4f, 0xaa, 0x42, 0xc2, 0xdf, 0x32, 0x4c, 0x96, 0xb3, 0x9e, 0x57, 0x51, 0x72, 0x2b, 0xf1, + 0x02, 0x1c, 0x35, 0xa6, 0x26, 0x9c, 0xef, 0x39, 0x03, 0x17, 0xdb, 0xb1, 0xe7, 0x10, 0xcc, 0x20, + 0xd4, 0x72, 0xa2, 0x7f, 0xa3, 0xc3, 0x23, 0x66, 0x39, 0x51, 0x70, 0x80, 0x79, 0xb9, 0xfe, 0xf5, + 0x71, 0x39, 0xd8, 0x4c, 0x82, 0xdd, 0x80, 0x69, 0xd3, 0x27, 0x46, 0x48, 0xac, 0xda, 0xc1, 0x30, + 0x9d, 0x63, 0x5a, 0x5c, 0x3d, 0x6a, 0x81, 0xe3, 0xc6, 0x54, 0x61, 0x52, 0xcf, 0x83, 0x4a, 0xb1, + 0x6e, 0x99, 0x7b, 0x16, 0xf4, 0x59, 0x98, 0xf0, 0xee, 0xb9, 0x32, 0x10, 0xe2, 0x58, 0xc2, 0xec, + 0x53, 0x6e, 0xd3, 0xda, 0x98, 0x37, 0x62, 0x99, 0x3c, 0x44, 0xee, 0x07, 0xae, 0xc8, 0x56, 0xb2, + 0xf2, 0x3e, 0x20, 0x07, 0xa6, 0xba, 0x6c, 0x1a, 0xb8, 0x97, 0x7b, 0x34, 0x56, 0xe6, 0x13, 0xaa, + 0xe6, 0x13, 0x64, 0x98, 0x71, 0x44, 0x82, 0x2a, 0xbe, 0x54, 0x39, 0x0b, 0x7a, 0x86, 0x49, 0x54, + 0xc5, 0x77, 0x23, 0x2a, 0xc4, 0x31, 0x1c, 0x1d, 0x40, 0x85, 0xdf, 0x48, 0xe6, 0x52, 0x76, 0xaa, + 0xb8, 0x03, 0x52, 0x74, 0xef, 0x4e, 0x8c, 0x8d, 0x0f, 0xbd, 0x52, 0x80, 0x55, 0x5a, 0xfa, 0x2f, + 0x8f, 0x49, 0x26, 0x15, 0x12, 0x3f, 0x3b, 0xcf, 0xba, 0x56, 0xe8, 0xbd, 0x86, 0x4f, 0xc2, 0x44, + 0x6f, 0xc7, 0x08, 0x22, 0x4e, 0x8d, 0x72, 0x5f, 0x4e, 0xb4, 0x68, 0xe1, 0x83, 0xc3, 0xea, 0x8c, + 0x20, 0xcd, 0x7e, 0x63, 0x5e, 0x17, 0xf5, 0xe1, 0x42, 0x10, 0x1a, 0x0e, 0x69, 0xdb, 0xc2, 0x5b, + 0x14, 0x84, 0x46, 0xb7, 0x57, 0x20, 0x85, 0x25, 0x3b, 0x4e, 0x6a, 0x0f, 0xa2, 0xc2, 0x59, 0xf8, + 0xd1, 0x5f, 0xd3, 0x60, 0x89, 0x95, 0xaf, 0xf6, 0x43, 0x8f, 0x67, 0xa5, 0x8d, 0x89, 0x9f, 0xfe, + 0x30, 0x95, 0x59, 0xf7, 0xed, 0x1c, 0x7c, 0x38, 0x97, 0x92, 0xfe, 0xa7, 0x1a, 0xa0, 0xc1, 0x59, + 0x44, 0x0e, 0x94, 0x2d, 0xb2, 0x6d, 0xf4, 0x9d, 0x30, 0xda, 0x85, 0x0b, 0xdd, 0x5a, 0x8d, 0x51, + 0xc6, 0xc2, 0xb1, 0x21, 0xf0, 0x62, 0x49, 0x01, 0x79, 0x30, 0x7d, 0x6f, 0xc7, 0x0e, 0x89, 0x63, + 0x07, 0xa1, 0x10, 0x90, 0xa3, 0x92, 0x93, 0x6a, 0xc7, 0x5b, 0x11, 0x62, 0x1c, 0xd3, 0xd0, 0x7f, + 0x71, 0x0c, 0xca, 0xa7, 0x78, 0x5d, 0xa7, 0x0f, 0x48, 0x5c, 0x57, 0xa7, 0xba, 0x0a, 0x19, 0xc5, + 0x4b, 0xc5, 0x34, 0x8a, 0xfa, 0x00, 0x32, 0x9c, 0x41, 0x00, 0x7d, 0x19, 0x2e, 0xda, 0xee, 0xb6, + 0x6f, 0x04, 0xa1, 0xdf, 0x37, 0xc3, 0xbe, 0x1f, 0x11, 0x1e, 0x2b, 0x42, 0x98, 0x59, 0xf7, 0xcd, + 0x0c, 0x74, 0x38, 0x93, 0x08, 0x22, 0x30, 0x75, 0xcf, 0xf3, 0x77, 0xa9, 0xfc, 0x1a, 0x2f, 0x9e, + 0xc2, 0xfb, 0x2d, 0x86, 0x22, 0x16, 0x5c, 0xfc, 0x77, 0x80, 0x23, 0xdc, 0xfa, 0x1f, 0x6b, 0x30, + 0xc1, 0xaf, 0x3f, 0x3d, 0x12, 0xa6, 0x0d, 0xeb, 0x6a, 0x6e, 0x5a, 0x3a, 0x6a, 0x71, 0xb0, 0x1a, + 0x8f, 0x88, 0xc5, 0xc1, 0xfa, 0x9a, 0xa3, 0x4e, 0xfc, 0xf1, 0x98, 0xf8, 0x16, 0xb6, 0x5f, 0x37, + 0xe1, 0x82, 0xd0, 0x3b, 0x6f, 0xd9, 0xdb, 0x84, 0x72, 0x57, 0xc3, 0x38, 0x08, 0xc4, 0x5d, 0x5e, + 0x26, 0xf8, 0xea, 0x83, 0x60, 0x9c, 0xd5, 0x06, 0xfd, 0x1b, 0x8d, 0xee, 0x8c, 0xa1, 0x6f, 0x9b, + 0x23, 0x65, 0xd0, 0x93, 0x7d, 0x5b, 0x59, 0xe7, 0xc8, 0xb8, 0xc1, 0xbe, 0x19, 0x6f, 0x91, 0xac, + 0xf4, 0xc1, 0x61, 0xb5, 0x9a, 0xe1, 0x0d, 0x8c, 0xbc, 0xd7, 0x74, 0x60, 0xbf, 0xf6, 0xe3, 0x63, + 0xab, 0x30, 0x4f, 0x7a, 0xd4, 0x63, 0x74, 0x03, 0x26, 0x02, 0xd3, 0xeb, 0x91, 0xe3, 0x1e, 0x91, + 0x4a, 0x5b, 0x5c, 0x72, 0x80, 0xdb, 0xb4, 0x25, 0xe6, 0x08, 0x96, 0xdf, 0x85, 0x19, 0xb5, 0xe7, + 0x19, 0x0e, 0x81, 0x86, 0xea, 0x10, 0x38, 0xf5, 0xb1, 0x9a, 0xea, 0x40, 0xf8, 0xfd, 0x12, 0x88, + 0x77, 0x37, 0x86, 0x38, 0x2f, 0xb0, 0xa3, 0xac, 0x61, 0x23, 0xbc, 0x35, 0x92, 0x7e, 0xcc, 0x2f, + 0x1e, 0x03, 0x35, 0x71, 0x18, 0x72, 0x61, 0xd2, 0x31, 0xb6, 0x88, 0x13, 0xbd, 0xc2, 0x70, 0xad, + 0xf8, 0x33, 0x01, 0x3c, 0xbb, 0x6e, 0x90, 0xf2, 0x2a, 0xf3, 0x42, 0x2c, 0xa8, 0x2c, 0xbf, 0x0c, + 0x15, 0xa5, 0xda, 0xa9, 0x7c, 0x30, 0xdf, 0xd4, 0xe0, 0x72, 0xc4, 0x12, 0xc9, 0xeb, 0xfc, 0xe8, + 0x39, 0x28, 0x1b, 0x3d, 0x9b, 0xb9, 0x25, 0x55, 0xc7, 0xee, 0x6a, 0xab, 0xc9, 0xca, 0xb0, 0x84, + 0xa2, 0x8f, 0x43, 0x39, 0x9a, 0x27, 0xa1, 0xa3, 0xc8, 0x25, 0x2e, 0xcf, 0x53, 0x64, 0x0d, 0xf4, + 0xac, 0x92, 0x07, 0x6d, 0x22, 0xde, 0xd1, 0x24, 0x61, 0x7e, 0x46, 0xaa, 0xff, 0x5a, 0x09, 0x66, + 0xb9, 0x2d, 0x5f, 0xb3, 0x5d, 0xcb, 0x76, 0x3b, 0x0f, 0x41, 0x94, 0x26, 0xde, 0x53, 0x2b, 0x9d, + 0xd5, 0x7b, 0x6a, 0x37, 0x61, 0xf2, 0x3d, 0xba, 0xac, 0x23, 0x76, 0x18, 0x6a, 0x75, 0xc9, 0xb9, + 0x66, 0x12, 0x21, 0xc0, 0x02, 0x85, 0xfe, 0x5f, 0x35, 0x58, 0x4c, 0x0c, 0xcb, 0x43, 0x10, 0xca, + 0xdb, 0x49, 0xa1, 0xbc, 0x5a, 0x2c, 0xa9, 0x85, 0xd2, 0xe7, 0x1c, 0xe1, 0xfc, 0xbb, 0x25, 0x18, + 0x6f, 0x13, 0x62, 0x3d, 0x84, 0x99, 0x7e, 0x27, 0xb1, 0x69, 0x7e, 0xb6, 0xf0, 0xdb, 0x1a, 0x79, + 0x16, 0xff, 0x76, 0xca, 0xe2, 0xff, 0x5c, 0x61, 0x0a, 0xc7, 0x9b, 0xfb, 0xbf, 0x51, 0x02, 0xa0, + 0xd5, 0xf8, 0x8b, 0x56, 0x22, 0x46, 0x2f, 0x7e, 0xf3, 0x70, 0xfa, 0x83, 0xf2, 0x52, 0xa1, 0x2e, + 0xdf, 0x4d, 0x1a, 0x8b, 0xfd, 0xc9, 0xc9, 0x37, 0x93, 0x92, 0xab, 0x6f, 0xfc, 0x8c, 0x56, 0x9f, + 0xfe, 0xcf, 0x35, 0x60, 0x79, 0x95, 0x1b, 0x1b, 0x6d, 0xf4, 0x12, 0xcc, 0xda, 0xfc, 0xb4, 0xae, + 0xa1, 0x26, 0x8f, 0x62, 0x67, 0x0a, 0x4d, 0x15, 0x80, 0x93, 0xf5, 0x50, 0x57, 0x19, 0xd7, 0x11, + 0xde, 0x95, 0x13, 0xfd, 0x90, 0xd9, 0x43, 0x67, 0xb2, 0x27, 0x46, 0xff, 0x71, 0x09, 0xe6, 0x53, + 0x75, 0x87, 0xd0, 0xe7, 0xcf, 0x47, 0x7a, 0x29, 0x09, 0x45, 0xc7, 0xce, 0x3f, 0xa1, 0xa8, 0xcc, + 0xed, 0x39, 0x7e, 0xbe, 0xb9, 0x3d, 0xbf, 0xa7, 0x01, 0x7b, 0x1c, 0xee, 0x21, 0x48, 0xcf, 0xbf, + 0x94, 0x94, 0x9e, 0x9f, 0x29, 0xca, 0x38, 0x39, 0x42, 0xf3, 0xb7, 0x4b, 0xc0, 0xf2, 0xed, 0x8b, + 0xe0, 0x04, 0xe5, 0xbc, 0x5f, 0xcb, 0x39, 0xef, 0x7f, 0x46, 0x84, 0x0b, 0xa4, 0x1c, 0x5f, 0x4a, + 0xc8, 0xc0, 0xc7, 0x95, 0x88, 0x80, 0xb1, 0xa4, 0x18, 0x19, 0x8c, 0x0a, 0x40, 0xf7, 0x61, 0x36, + 0xd8, 0xf1, 0xbc, 0x30, 0x32, 0x81, 0xc5, 0xdc, 0xad, 0x16, 0x0e, 0xa9, 0x8d, 0x3e, 0x85, 0x2f, + 0xcc, 0xb6, 0x8a, 0x1b, 0x27, 0x49, 0xa1, 0x15, 0x80, 0x2d, 0xc7, 0x33, 0x77, 0xeb, 0xcd, 0x06, + 0x8e, 0x02, 0x2f, 0x59, 0x44, 0x51, 0x4d, 0x96, 0x62, 0xa5, 0x86, 0xfe, 0x87, 0x1a, 0x1f, 0xad, + 0x53, 0x2c, 0xab, 0x87, 0x28, 0x25, 0x3f, 0x9a, 0x92, 0x92, 0x79, 0xaf, 0xcb, 0x7d, 0x57, 0x7c, + 0x85, 0x7c, 0xba, 0xc9, 0x81, 0x59, 0x47, 0x7d, 0x86, 0x41, 0xb0, 0x71, 0xa1, 0x17, 0x1c, 0xc4, + 0x83, 0x85, 0x4a, 0x11, 0x4e, 0x22, 0xa7, 0x62, 0x34, 0xea, 0xb8, 0xfa, 0x2c, 0x35, 0x6b, 0xd8, + 0x52, 0x01, 0x38, 0x59, 0x4f, 0x7f, 0x13, 0x3e, 0xc2, 0xbb, 0xcd, 0xe2, 0x9e, 0xd7, 0xf6, 0x4d, + 0x12, 0x04, 0x75, 0xa3, 0x67, 0x98, 0x54, 0xb1, 0x67, 0xd7, 0x05, 0xb9, 0xc7, 0xeb, 0x14, 0x4f, + 0xc7, 0xff, 0x7f, 0x0d, 0xaa, 0x0a, 0xce, 0x44, 0x24, 0x48, 0xc4, 0xa0, 0xdf, 0xd2, 0xa0, 0x62, + 0xb8, 0xae, 0x17, 0x1a, 0xea, 0x01, 0x8c, 0x55, 0xfc, 0x41, 0xad, 0x5c, 0x52, 0x2b, 0xab, 0x31, + 0x99, 0xd4, 0x51, 0xab, 0x02, 0xc1, 0x6a, 0x6f, 0x96, 0x3f, 0x07, 0x0b, 0xe9, 0x56, 0xa7, 0x52, + 0xe1, 0x6b, 0x70, 0x49, 0xe9, 0x95, 0xb8, 0x78, 0x45, 0xf5, 0xe5, 0xe7, 0x61, 0x6a, 0xcf, 0x0e, + 0xec, 0xe8, 0x02, 0xaf, 0x32, 0x8a, 0x77, 0x79, 0x31, 0x8e, 0xe0, 0xfa, 0x1b, 0x70, 0x41, 0xc5, + 0xc1, 0x96, 0xd8, 0x46, 0xfb, 0x34, 0xf3, 0xb0, 0x0e, 0xcf, 0x28, 0x18, 0x32, 0x6f, 0x1f, 0x9d, + 0x06, 0xdd, 0x57, 0x27, 0x23, 0x0e, 0x17, 0x21, 0xf2, 0xdf, 0xd5, 0xe0, 0x09, 0x92, 0xc7, 0x30, + 0x82, 0xdd, 0xbf, 0x30, 0xe2, 0x8c, 0xe6, 0x32, 0xa4, 0x48, 0xef, 0x92, 0x07, 0xc6, 0xf9, 0x3d, + 0x43, 0x07, 0x00, 0x81, 0x9c, 0x92, 0x51, 0xa2, 0x27, 0x33, 0xe7, 0x58, 0xa4, 0xf0, 0x94, 0xbf, + 0xb1, 0x42, 0x0c, 0xbd, 0x07, 0xe5, 0x40, 0xcc, 0xe4, 0x28, 0x97, 0x23, 0x33, 0x18, 0x43, 0x04, + 0x88, 0x89, 0x5f, 0x58, 0x92, 0x41, 0xbf, 0xa9, 0xc1, 0x45, 0x27, 0x63, 0x5d, 0x88, 0x2d, 0xa1, + 0x7d, 0x0e, 0x4b, 0x8e, 0xbb, 0x00, 0xb3, 0x20, 0x38, 0xb3, 0x2b, 0xe8, 0x1f, 0xe7, 0xde, 0xc4, + 0xe3, 0x31, 0xcb, 0x77, 0x46, 0xec, 0xe4, 0x59, 0x5d, 0xca, 0xfb, 0xe6, 0x14, 0x57, 0x51, 0x98, + 0xa7, 0x6a, 0x0b, 0x26, 0xb7, 0x98, 0x8a, 0x2f, 0x58, 0xbd, 0xb0, 0x3d, 0x21, 0x9e, 0xeb, 0x66, + 0xfa, 0x37, 0xff, 0x1f, 0x0b, 0xcc, 0xe8, 0x6d, 0x18, 0xb3, 0xdc, 0xe8, 0x35, 0x8f, 0x57, 0x47, + 0xd0, 0x6f, 0xe3, 0x00, 0x53, 0xca, 0x1d, 0x14, 0x29, 0x72, 0xa1, 0xec, 0x8a, 0x5d, 0x5d, 0xf0, + 0x62, 0xe1, 0xf7, 0x0c, 0xa5, 0x76, 0x20, 0x75, 0x92, 0xa8, 0x04, 0x4b, 0x1a, 0x94, 0x9e, 0x54, + 0xd8, 0xc7, 0x47, 0xa3, 0x37, 0xd4, 0xa3, 0xef, 0x2d, 0x55, 0xf7, 0x3e, 0xc5, 0x53, 0xda, 0xb3, + 0xb9, 0x7a, 0x37, 0x81, 0xc9, 0xd0, 0x60, 0xf1, 0x0a, 0x93, 0xc5, 0xe3, 0x05, 0x68, 0xff, 0xef, + 0x50, 0x2c, 0xb1, 0x2a, 0xc1, 0x7e, 0x06, 0x58, 0x20, 0xa7, 0x8c, 0xc5, 0x9f, 0x5a, 0x14, 0x07, + 0x66, 0x85, 0x19, 0x8b, 0xe7, 0xb1, 0xe7, 0x8c, 0xc5, 0xff, 0xc7, 0x02, 0x33, 0x7a, 0x97, 0xaa, + 0x93, 0x5c, 0x8e, 0x8b, 0x6c, 0x4c, 0x6f, 0x8c, 0xb8, 0xc6, 0x82, 0x28, 0x44, 0x95, 0xff, 0xc2, + 0x12, 0x3f, 0xda, 0x82, 0x29, 0x61, 0xba, 0x89, 0x8b, 0xb5, 0xaf, 0x8e, 0x90, 0xed, 0x37, 0x7a, + 0x9a, 0x81, 0x5f, 0x6e, 0x8b, 0x10, 0xeb, 0xff, 0xa4, 0xcc, 0x8d, 0x6e, 0x71, 0xd6, 0xb7, 0x0d, + 0xe5, 0x08, 0xdd, 0x28, 0xe1, 0xd2, 0x51, 0x36, 0x76, 0xfe, 0x69, 0xd1, 0x2f, 0x2c, 0x71, 0xa3, + 0x7a, 0x56, 0x64, 0x7a, 0x9c, 0x31, 0x6f, 0xb8, 0xa8, 0xf4, 0x64, 0x28, 0xca, 0xd8, 0xc3, 0x08, + 0x45, 0x39, 0xcb, 0xb7, 0xeb, 0x5f, 0x83, 0xf9, 0x28, 0x52, 0xc4, 0x22, 0xcc, 0x21, 0x2c, 0x62, + 0x28, 0xd9, 0x5d, 0xad, 0x7a, 0x12, 0x84, 0xd3, 0x75, 0xd1, 0xef, 0x6b, 0x50, 0x36, 0xc5, 0x2e, + 0x2d, 0xd6, 0xd5, 0xad, 0xd1, 0x3c, 0x33, 0x2b, 0xd1, 0xa6, 0xcf, 0xd5, 0xbd, 0xbb, 0x91, 0x8c, + 0x88, 0x8a, 0xcf, 0xc8, 0x53, 0x2f, 0x7b, 0x8d, 0xfe, 0x03, 0x55, 0x66, 0x1d, 0x96, 0x23, 0x9e, + 0x5d, 0x7d, 0xe1, 0xc1, 0x9d, 0xb7, 0x47, 0xfc, 0x8a, 0xd5, 0x18, 0x23, 0xff, 0x90, 0x2f, 0x4a, + 0xbd, 0x35, 0x86, 0x9c, 0xd1, 0xb7, 0xa8, 0xdd, 0x5f, 0xde, 0x85, 0xd9, 0xc4, 0x08, 0x9e, 0xe7, + 0x81, 0xc1, 0xb2, 0x0b, 0x0b, 0xe9, 0x0f, 0x3d, 0xd7, 0x03, 0x8a, 0x9b, 0x30, 0x2d, 0x25, 0x30, + 0x7a, 0x5a, 0x21, 0x14, 0xef, 0x90, 0x37, 0xc9, 0x01, 0xa7, 0x5a, 0x4d, 0x28, 0xf8, 0xdc, 0x6c, + 0xbf, 0x4b, 0x0b, 0x04, 0x42, 0xfd, 0x07, 0xc2, 0xe8, 0xbb, 0x43, 0xba, 0x3d, 0xc7, 0x08, 0xc9, + 0xa3, 0xef, 0x25, 0xd5, 0xff, 0x97, 0xc6, 0x05, 0xa9, 0x78, 0x61, 0xc5, 0x80, 0x4a, 0x97, 0xe7, + 0xba, 0x62, 0xb9, 0x55, 0x8b, 0x5d, 0xaf, 0x63, 0x91, 0x1a, 0xeb, 0x31, 0x1a, 0xac, 0xe2, 0x44, + 0xf7, 0x06, 0xdf, 0x00, 0xba, 0x36, 0xda, 0x8e, 0x37, 0xf4, 0x53, 0x40, 0x68, 0xb0, 0x8d, 0xfa, + 0xe2, 0x8a, 0x76, 0xfc, 0x8b, 0x2b, 0x27, 0xbf, 0xc3, 0xa1, 0x7f, 0x47, 0x83, 0xcc, 0x0c, 0xd1, + 0x48, 0x87, 0x49, 0x1e, 0x1f, 0xae, 0x3e, 0x8e, 0xc4, 0x83, 0xc7, 0xb1, 0x80, 0x20, 0x1f, 0x2e, + 0x8a, 0xe8, 0xeb, 0x9b, 0xe4, 0x20, 0x7e, 0x96, 0x47, 0x4c, 0xfd, 0xf0, 0x71, 0x98, 0x2c, 0x07, + 0x4e, 0x3b, 0x85, 0x09, 0x67, 0xe2, 0x66, 0x01, 0xc0, 0xcc, 0x88, 0x78, 0x34, 0x4e, 0xc9, 0x59, + 0x57, 0xcf, 0x37, 0x00, 0x98, 0x93, 0x38, 0x39, 0x00, 0x98, 0xd5, 0x7b, 0x44, 0x8e, 0xe3, 0x59, + 0x5f, 0x73, 0x9c, 0x97, 0xdf, 0xd3, 0x60, 0x71, 0xe0, 0x16, 0xfd, 0x50, 0x87, 0xb9, 0x0f, 0xcd, + 0x27, 0xf7, 0x6c, 0xfa, 0x75, 0x9b, 0x4a, 0x66, 0x72, 0x8d, 0xb7, 0x60, 0x36, 0xe1, 0xbb, 0x94, + 0x97, 0xb2, 0xb4, 0xcc, 0x4b, 0x59, 0xea, 0x9d, 0xab, 0xd2, 0xb1, 0x77, 0xae, 0x8e, 0x66, 0xc4, + 0x74, 0x33, 0x3b, 0xf0, 0x1d, 0x98, 0x64, 0x37, 0xa3, 0xa2, 0xd7, 0xbd, 0x5e, 0x29, 0x7c, 0xe3, + 0x2a, 0xe0, 0x62, 0x80, 0xff, 0x8f, 0x05, 0x56, 0xd4, 0x80, 0x05, 0xf6, 0xea, 0x73, 0xcb, 0xf7, + 0xb6, 0x6d, 0x87, 0x6c, 0xc4, 0x12, 0x47, 0xde, 0xc3, 0xae, 0xa7, 0xe0, 0x78, 0xa0, 0x05, 0xc2, + 0xdc, 0x92, 0xe4, 0x0b, 0xe1, 0xa5, 0x82, 0x3e, 0x7c, 0x9e, 0xe9, 0x4a, 0x5a, 0x90, 0xef, 0x01, + 0x90, 0x68, 0xe2, 0xa2, 0xe0, 0x9d, 0xd7, 0x8a, 0xdd, 0x30, 0x97, 0xd3, 0x1f, 0x49, 0x0c, 0x59, + 0x14, 0x60, 0x85, 0x08, 0xf2, 0xa1, 0xb2, 0x13, 0x3f, 0xfe, 0x23, 0xcc, 0xba, 0xd7, 0x47, 0x7c, + 0x78, 0x88, 0x6f, 0x50, 0x4a, 0x01, 0x56, 0x89, 0x20, 0x3f, 0xf1, 0xda, 0xf6, 0x08, 0x4f, 0x2b, + 0xc4, 0xd6, 0xc0, 0x49, 0x2f, 0x6d, 0x53, 0x9a, 0xae, 0xbc, 0xda, 0x38, 0x8a, 0x1d, 0x18, 0x5f, + 0x90, 0x8c, 0x69, 0xc6, 0x65, 0x58, 0xa1, 0x42, 0xc7, 0xb6, 0x1b, 0x5f, 0x67, 0x15, 0x66, 0xe1, + 0xeb, 0x23, 0x5e, 0x2b, 0x16, 0x9b, 0x7f, 0x5c, 0x80, 0x55, 0x22, 0xc8, 0x05, 0xe8, 0xca, 0x5b, + 0xae, 0xc2, 0x3c, 0x2c, 0xf4, 0x9d, 0xf1, 0x5d, 0x59, 0xee, 0x80, 0x8b, 0x7f, 0x63, 0x85, 0x02, + 0xb5, 0x7b, 0xa5, 0x13, 0x02, 0x8a, 0xab, 0x50, 0x43, 0x39, 0x20, 0x3e, 0x1d, 0x6b, 0x12, 0x15, + 0xb6, 0x5e, 0x9f, 0x54, 0xb4, 0x88, 0x07, 0x87, 0xd5, 0x19, 0x26, 0x43, 0x06, 0xb4, 0x8a, 0xf8, + 0xc4, 0x61, 0xe6, 0xb8, 0x13, 0x07, 0x74, 0x1d, 0x16, 0x03, 0xf5, 0x04, 0x9f, 0x09, 0x86, 0x59, + 0xd6, 0x44, 0xde, 0x8a, 0x6e, 0xa7, 0x2b, 0xe0, 0xc1, 0x36, 0x5c, 0xf0, 0x11, 0x8b, 0xb5, 0x9f, + 0x53, 0x05, 0x1f, 0x2f, 0xc3, 0x12, 0x8a, 0xf6, 0x52, 0x8f, 0x52, 0xcf, 0x8f, 0xea, 0x39, 0x18, + 0xee, 0x85, 0x6a, 0xf4, 0x65, 0xf5, 0xe1, 0xd1, 0x85, 0xe2, 0xa1, 0x5c, 0xd9, 0x37, 0x9b, 0x4f, + 0x78, 0x7c, 0xb4, 0x9f, 0x0c, 0x62, 0x5e, 0x3c, 0x93, 0xa8, 0x51, 0x79, 0x06, 0x91, 0x1b, 0xc0, + 0xfc, 0x9d, 0x32, 0x54, 0x14, 0xdd, 0xe3, 0xfd, 0xb8, 0xb0, 0x12, 0x42, 0xc5, 0xf4, 0xdc, 0x20, + 0xf4, 0x0d, 0xe5, 0x92, 0xce, 0x88, 0x34, 0xe5, 0x87, 0xd7, 0x63, 0xcc, 0x58, 0x25, 0x43, 0x97, + 0xa8, 0xf4, 0xdd, 0x8c, 0x9d, 0x81, 0xef, 0x46, 0x2e, 0xd1, 0x0c, 0xff, 0xcd, 0xa7, 0x00, 0x22, + 0x49, 0x2f, 0x5f, 0x76, 0x94, 0x59, 0xcf, 0x9a, 0xc1, 0x0d, 0x09, 0xc3, 0x4a, 0x3d, 0x74, 0x1f, + 0x66, 0x1d, 0x35, 0xe3, 0x91, 0xd8, 0x86, 0x0a, 0x9d, 0xae, 0x26, 0x52, 0x27, 0x45, 0x07, 0x7d, + 0x4a, 0x11, 0x4e, 0x92, 0xa2, 0x6c, 0xe0, 0x44, 0x59, 0xba, 0x46, 0xf2, 0x43, 0xca, 0x5c, 0x5f, + 0x31, 0x1b, 0xc8, 0xa2, 0x00, 0x2b, 0x44, 0x72, 0x9c, 0x45, 0x53, 0x85, 0x9c, 0x45, 0x7d, 0xb8, + 0xe0, 0x93, 0xd0, 0x3f, 0xa8, 0x1f, 0x98, 0xec, 0xfd, 0x36, 0x3f, 0x64, 0x29, 0xb2, 0xca, 0xc5, + 0x62, 0xe0, 0xf1, 0x20, 0x2a, 0x9c, 0x85, 0x3f, 0x21, 0xe2, 0xa6, 0x8f, 0x15, 0x71, 0x9f, 0x86, + 0x4a, 0x48, 0xcc, 0x1d, 0xd7, 0x36, 0x0d, 0xa7, 0xd9, 0x10, 0xf7, 0x54, 0xe3, 0xd5, 0x1a, 0x83, + 0xb0, 0x5a, 0x0f, 0xd5, 0x60, 0xac, 0x6f, 0x5b, 0x42, 0xce, 0xff, 0x8c, 0x7c, 0xc6, 0xb3, 0xd9, + 0x78, 0x70, 0x58, 0xfd, 0x50, 0xec, 0x7d, 0x91, 0x5f, 0x75, 0xb5, 0xb7, 0xdb, 0xb9, 0x1a, 0x1e, + 0xf4, 0x48, 0xb0, 0xb2, 0xd9, 0x6c, 0x60, 0xda, 0x38, 0xcb, 0x91, 0x36, 0x33, 0xbc, 0x23, 0x4d, + 0xbf, 0x05, 0x10, 0x0b, 0x93, 0x91, 0x5d, 0x1b, 0x3f, 0x9d, 0x80, 0x4b, 0xa3, 0x1e, 0x19, 0xb2, + 0x5c, 0x52, 0x2c, 0xc1, 0xfd, 0xea, 0x76, 0x48, 0xfc, 0xdb, 0xb7, 0xd7, 0xef, 0xec, 0xf8, 0x24, + 0xd8, 0xf1, 0x1c, 0xab, 0x60, 0x32, 0xab, 0xf8, 0x7d, 0x84, 0x01, 0x8c, 0x38, 0x87, 0x12, 0xaa, + 0xc3, 0x62, 0x94, 0x64, 0x1f, 0x1b, 0x21, 0xa9, 0xf5, 0xfd, 0x20, 0x14, 0x91, 0x8e, 0xcc, 0x47, + 0xbb, 0x96, 0x06, 0xe2, 0xc1, 0xfa, 0x69, 0x24, 0x3c, 0xd9, 0xfe, 0x38, 0x7b, 0x83, 0x6f, 0x00, + 0x09, 0xcf, 0xb8, 0x3f, 0x58, 0x5f, 0x45, 0x22, 0xdf, 0xec, 0x63, 0xb2, 0x23, 0x85, 0x24, 0x7e, + 0xd0, 0x6f, 0xb0, 0x3e, 0xb2, 0xe0, 0x29, 0x9f, 0x98, 0x5e, 0xb7, 0x4b, 0x5c, 0x8b, 0x67, 0x21, + 0x34, 0xfc, 0x8e, 0xed, 0x5e, 0xf3, 0x0d, 0x56, 0x51, 0x3c, 0x0c, 0xf8, 0xcc, 0xd1, 0x61, 0xf5, + 0x29, 0x7c, 0x4c, 0x3d, 0x7c, 0x2c, 0x16, 0xd4, 0x85, 0xf9, 0x3e, 0x4b, 0xbe, 0xe2, 0x37, 0xdd, + 0x90, 0xf8, 0x7b, 0x86, 0x53, 0xf0, 0x09, 0x09, 0xc6, 0xbb, 0x9b, 0x49, 0x54, 0x38, 0x8d, 0x1b, + 0x1d, 0x50, 0xb1, 0x20, 0xba, 0xa3, 0x90, 0x2c, 0x17, 0xcf, 0xb6, 0x86, 0x07, 0xd1, 0xe1, 0x2c, + 0x1a, 0xfa, 0xdf, 0xd7, 0x40, 0x1c, 0x8e, 0x50, 0xfb, 0x50, 0x31, 0x72, 0xcb, 0xef, 0xff, 0x53, + 0xc2, 0xf7, 0x40, 0x20, 0x60, 0x29, 0xae, 0x86, 0x4a, 0x90, 0x74, 0xf2, 0xdb, 0xb0, 0x71, 0x8a, + 0xa6, 0xb1, 0xdc, 0x14, 0x4d, 0xdf, 0xd5, 0x20, 0xfd, 0x94, 0x18, 0xb5, 0xba, 0xc5, 0x3d, 0x1a, + 0x11, 0x8d, 0xcf, 0xc3, 0xc5, 0x78, 0x11, 0x8e, 0x60, 0x49, 0x5d, 0x6d, 0x84, 0xb0, 0xfb, 0xec, + 0x10, 0xe8, 0xe3, 0x75, 0x35, 0xfd, 0x87, 0x73, 0x30, 0xc9, 0x6f, 0x7e, 0x50, 0xd9, 0x93, 0x11, + 0x61, 0x72, 0xb3, 0xf8, 0xdd, 0x92, 0x02, 0x81, 0x24, 0x89, 0x4c, 0x1c, 0xa5, 0x63, 0x33, 0x71, + 0x60, 0x9e, 0x27, 0x6d, 0x04, 0xfb, 0xbc, 0x8e, 0x9b, 0x22, 0x35, 0xb7, 0xc8, 0x91, 0x86, 0xc2, + 0x84, 0xe1, 0x3a, 0x5e, 0x3c, 0x03, 0x1c, 0x1f, 0x00, 0xc5, 0x7c, 0x9d, 0x3b, 0xc6, 0x74, 0x8d, + 0x43, 0xed, 0x27, 0x8a, 0x3b, 0x73, 0xc5, 0x90, 0x0f, 0x11, 0x6a, 0x2f, 0x39, 0x7e, 0x32, 0x97, + 0xe3, 0xb7, 0x61, 0x4a, 0xc8, 0x0d, 0x21, 0xc4, 0x5e, 0x1d, 0x21, 0x07, 0x9a, 0x72, 0x37, 0x92, + 0x17, 0xe0, 0x08, 0x39, 0xdd, 0x19, 0xbb, 0xc6, 0xbe, 0xdd, 0xed, 0x77, 0x99, 0xe4, 0x9a, 0x50, + 0xab, 0xb2, 0x62, 0x1c, 0xc1, 0x59, 0x55, 0xee, 0x03, 0x67, 0xfa, 0x88, 0x5a, 0x55, 0x3c, 0x2d, + 0x11, 0xc1, 0xd1, 0xdb, 0x50, 0xee, 0x1a, 0xfb, 0xed, 0xbe, 0xdf, 0x21, 0xc2, 0x64, 0xcd, 0x77, + 0xc4, 0xf6, 0x43, 0xdb, 0x59, 0xa1, 0x5a, 0x74, 0xe8, 0xaf, 0x34, 0xdd, 0xf0, 0xb6, 0xdf, 0x0e, + 0x7d, 0x99, 0xee, 0x69, 0x5d, 0x60, 0xc1, 0x12, 0x1f, 0x72, 0x60, 0xae, 0x6b, 0xec, 0x6f, 0xba, + 0x86, 0xcc, 0x80, 0x5f, 0x29, 0x48, 0x81, 0xf9, 0xed, 0xd6, 0x13, 0xb8, 0x70, 0x0a, 0x77, 0x86, + 0x8b, 0x70, 0xe6, 0xbc, 0x5c, 0x84, 0xab, 0xf2, 0xa8, 0x7e, 0x96, 0x31, 0xe1, 0x13, 0x59, 0xde, + 0xf2, 0xe3, 0x8f, 0xe1, 0xdf, 0x91, 0xc7, 0xf0, 0x73, 0xc5, 0xfd, 0x7a, 0xc7, 0x1c, 0xc1, 0xf7, + 0xa1, 0x62, 0xc9, 0xa7, 0xec, 0x83, 0xa5, 0xf9, 0xe2, 0x76, 0x65, 0xfc, 0x22, 0xbe, 0x92, 0x9f, + 0x36, 0x46, 0x8d, 0x55, 0x3a, 0xe8, 0x36, 0xcf, 0x90, 0xee, 0x90, 0x30, 0xae, 0xc2, 0xf4, 0xe2, + 0x05, 0xee, 0x3a, 0x88, 0x12, 0x9a, 0x0f, 0x54, 0xc0, 0xd9, 0xed, 0xa8, 0x2a, 0xc9, 0xe3, 0x83, + 0x17, 0xe3, 0x2b, 0xd9, 0x89, 0xbb, 0x37, 0xbf, 0xa4, 0xc1, 0x02, 0x7f, 0xb9, 0xa6, 0xee, 0x75, + 0x7b, 0x9e, 0x4b, 0xe8, 0xb4, 0x20, 0x36, 0xa6, 0x9f, 0x2f, 0x2e, 0x1b, 0xda, 0x29, 0x8c, 0xe2, + 0x98, 0x23, 0x55, 0x8a, 0x07, 0x28, 0xa3, 0xdf, 0xd3, 0x60, 0xa9, 0x9b, 0x93, 0xfe, 0x75, 0xe9, + 0x42, 0xf1, 0x78, 0xa3, 0x93, 0x52, 0xca, 0xf2, 0xd7, 0xb4, 0x4e, 0xaa, 0x85, 0x73, 0xfb, 0x36, + 0x6a, 0x54, 0xe2, 0x28, 0x77, 0x92, 0x7e, 0x1e, 0x16, 0xd2, 0x7b, 0x80, 0x9a, 0xd3, 0x5d, 0x3b, + 0xd7, 0x9c, 0xee, 0xfa, 0x6b, 0x70, 0x39, 0x7b, 0xce, 0xa9, 0x4a, 0xc4, 0x1e, 0x47, 0x10, 0x96, + 0x48, 0x9c, 0xb1, 0x8c, 0x16, 0x62, 0x0e, 0xab, 0x7d, 0xe2, 0xfb, 0x3f, 0xb9, 0xf2, 0xd8, 0x0f, + 0x7e, 0x72, 0xe5, 0xb1, 0x1f, 0xfd, 0xe4, 0xca, 0x63, 0x5f, 0x3d, 0xba, 0xa2, 0x7d, 0xff, 0xe8, + 0x8a, 0xf6, 0x83, 0xa3, 0x2b, 0xda, 0x8f, 0x8e, 0xae, 0x68, 0xff, 0xe3, 0xe8, 0x8a, 0xf6, 0x2b, + 0xff, 0xf3, 0xca, 0x63, 0x6f, 0x4f, 0x89, 0x1e, 0xfd, 0x79, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbd, + 0xc7, 0x67, 0x56, 0x52, 0xab, 0x00, 0x00, } func (m *Addon) Marshal() (dAtA []byte, err error) { @@ -4873,6 +5621,15 @@ func (m *Condition) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Codes) > 0 { + for iNdEx := len(m.Codes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Codes[iNdEx]) + copy(dAtA[i:], m.Codes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Codes[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } i -= len(m.Message) copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) @@ -4976,6 +5733,25 @@ func (m *ControllerDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SeedSelector != nil { + { + size, err := m.SeedSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Policy != nil { + i -= len(*m.Policy) + copy(dAtA[i:], *m.Policy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Policy))) + i-- + dAtA[i] = 0x1a + } if m.ProviderConfig != nil { { size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i]) @@ -5347,6 +6123,16 @@ func (m *ControllerResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Primary != nil { + i-- + if *m.Primary { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } if m.ReconcileTimeout != nil { { size, err := m.ReconcileTimeout.MarshalToSizedBuffer(dAtA[:i]) @@ -5538,6 +6324,56 @@ func (m *DNSProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *DataVolume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DataVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DataVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Encrypted != nil { + i-- + if *m.Encrypted { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + i -= len(m.VolumeSize) + copy(dAtA[i:], m.VolumeSize) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeSize))) + i-- + dAtA[i] = 0x1a + if m.Type != nil { + i -= len(*m.Type) + copy(dAtA[i:], *m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *Endpoint) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5643,6 +6479,16 @@ func (m *Extension) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Disabled != nil { + i-- + if *m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } if m.ProviderConfig != nil { { size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i]) @@ -5893,6 +6739,84 @@ func (m *HorizontalPodAutoscalerConfig) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } +func (m *Ingress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Controller.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressController) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressController) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressController) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ProviderConfig != nil { + { + size, err := m.ProviderConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *KubeAPIServerConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5913,6 +6837,30 @@ func (m *KubeAPIServerConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Requests != nil { + { + size, err := m.Requests.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.WatchCacheSizes != nil { + { + size, err := m.WatchCacheSizes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } if m.ServiceAccountConfig != nil { { size, err := m.ServiceAccountConfig.MarshalToSizedBuffer(dAtA[:i]) @@ -6022,6 +6970,39 @@ func (m *KubeAPIServerConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *KubeAPIServerRequests) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KubeAPIServerRequests) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KubeAPIServerRequests) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxMutatingInflight != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxMutatingInflight)) + i-- + dAtA[i] = 0x10 + } + if m.MaxNonMutatingInflight != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxNonMutatingInflight)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *KubeControllerManagerConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6042,6 +7023,18 @@ func (m *KubeControllerManagerConfig) MarshalToSizedBuffer(dAtA []byte) (int, er _ = i var l int _ = l + if m.PodEvictionTimeout != nil { + { + size, err := m.PodEvictionTimeout.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } if m.NodeCIDRMaskSize != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.NodeCIDRMaskSize)) i-- @@ -6132,6 +7125,13 @@ func (m *KubeSchedulerConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.KubeMaxPDVols != nil { + i -= len(*m.KubeMaxPDVols) + copy(dAtA[i:], *m.KubeMaxPDVols) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.KubeMaxPDVols))) + i-- + dAtA[i] = 0x12 + } { size, err := m.KubernetesConfig.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -6165,6 +7165,40 @@ func (m *KubeletConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SystemReserved != nil { + { + size, err := m.SystemReserved.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + if m.KubeReserved != nil { + { + size, err := m.KubeReserved.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.FailSwapOn != nil { + i-- + if *m.FailSwapOn { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + } if m.ImagePullProgressDeadline != nil { { size, err := m.ImagePullProgressDeadline.MarshalToSizedBuffer(dAtA[:i]) @@ -6506,6 +7540,77 @@ func (m *KubeletConfigEvictionSoftGracePeriod) MarshalToSizedBuffer(dAtA []byte) return len(dAtA) - i, nil } +func (m *KubeletConfigReserved) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KubeletConfigReserved) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KubeletConfigReserved) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PID != nil { + { + size, err := m.PID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.EphemeralStorage != nil { + { + size, err := m.EphemeralStorage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Memory != nil { + { + size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.CPU != nil { + { + size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *Kubernetes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6526,6 +7631,18 @@ func (m *Kubernetes) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.VerticalPodAutoscaler != nil { + { + size, err := m.VerticalPodAutoscaler.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } i -= len(m.Version) copy(dAtA[i:], m.Version) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) @@ -6918,6 +8035,79 @@ func (m *Machine) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *MachineControllerManagerSettings) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MachineControllerManagerSettings) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MachineControllerManagerSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NodeConditions) > 0 { + for iNdEx := len(m.NodeConditions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NodeConditions[iNdEx]) + copy(dAtA[i:], m.NodeConditions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeConditions[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.MaxEvictRetries != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxEvictRetries)) + i-- + dAtA[i] = 0x20 + } + if m.MachineCreationTimeout != nil { + { + size, err := m.MachineCreationTimeout.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.MachineHealthTimeout != nil { + { + size, err := m.MachineHealthTimeout.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.MachineDrainTimeout != nil { + { + size, err := m.MachineDrainTimeout.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *MachineImage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6960,6 +8150,53 @@ func (m *MachineImage) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *MachineImageVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MachineImageVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MachineImageVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CRI) > 0 { + for iNdEx := len(m.CRI) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CRI[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ExpirableVersion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *MachineType) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7103,6 +8340,16 @@ func (m *Maintenance) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ConfineSpecUpdateRollout != nil { + i-- + if *m.ConfineSpecUpdateRollout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } if m.TimeWindow != nil { { size, err := m.TimeWindow.MarshalToSizedBuffer(dAtA[:i]) @@ -7237,6 +8484,44 @@ func (m *Monitoring) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *NamedResourceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedResourceReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedResourceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ResourceRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *Networking) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7910,6 +9195,18 @@ func (m *ProjectSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Tolerations != nil { + { + size, err := m.Tolerations.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.Namespace != nil { i -= len(*m.Namespace) copy(dAtA[i:], *m.Namespace) @@ -7992,6 +9289,30 @@ func (m *ProjectStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.StaleAutoDeleteTimestamp != nil { + { + size, err := m.StaleAutoDeleteTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.StaleSinceTimestamp != nil { + { + size, err := m.StaleSinceTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } i -= len(m.Phase) copy(dAtA[i:], m.Phase) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) @@ -8003,6 +9324,57 @@ func (m *ProjectStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ProjectTolerations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectTolerations) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectTolerations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Whitelist) > 0 { + for iNdEx := len(m.Whitelist) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Whitelist[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Defaults) > 0 { + for iNdEx := len(m.Defaults) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Defaults[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *Provider) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -8069,39 +9441,6 @@ func (m *Provider) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ProviderConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProviderConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProviderConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.RawExtension.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *Quota) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -8279,6 +9618,30 @@ func (m *Region) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.Labels[string(keysForLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForLabels[iNdEx]) + copy(dAtA[i:], keysForLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } if len(m.Zones) > 0 { for iNdEx := len(m.Zones) - 1; iNdEx >= 0; iNdEx-- { { @@ -8301,6 +9664,44 @@ func (m *Region) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ResourceWatchCacheSize) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceWatchCacheSize) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceWatchCacheSize) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.CacheSize)) + i-- + dAtA[i] = 0x18 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + if m.APIGroup != nil { + i -= len(*m.APIGroup) + copy(dAtA[i:], *m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *SecretBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -8535,9 +9936,85 @@ func (m *SeedDNS) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - i -= len(m.IngressDomain) - copy(dAtA[i:], m.IngressDomain) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IngressDomain))) + if m.Provider != nil { + { + size, err := m.Provider.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.IngressDomain != nil { + i -= len(*m.IngressDomain) + copy(dAtA[i:], *m.IngressDomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IngressDomain))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SeedDNSProvider) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedDNSProvider) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedDNSProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Zones != nil { + { + size, err := m.Zones.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Domains != nil { + { + size, err := m.Domains.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil @@ -8696,6 +10173,304 @@ func (m *SeedProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SeedSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProviderTypes) > 0 { + for iNdEx := len(m.ProviderTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ProviderTypes[iNdEx]) + copy(dAtA[i:], m.ProviderTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderTypes[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SeedSettingExcessCapacityReservation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedSettingExcessCapacityReservation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedSettingExcessCapacityReservation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *SeedSettingLoadBalancerServices) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedSettingLoadBalancerServices) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedSettingLoadBalancerServices) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SeedSettingScheduling) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedSettingScheduling) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedSettingScheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Visible { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *SeedSettingShootDNS) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedSettingShootDNS) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedSettingShootDNS) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *SeedSettingVerticalPodAutoscaler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedSettingVerticalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedSettingVerticalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *SeedSettings) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedSettings) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.VerticalPodAutoscaler != nil { + { + size, err := m.VerticalPodAutoscaler.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.LoadBalancerServices != nil { + { + size, err := m.LoadBalancerServices.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.ShootDNS != nil { + { + size, err := m.ShootDNS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Scheduling != nil { + { + size, err := m.Scheduling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ExcessCapacityReservation != nil { + { + size, err := m.ExcessCapacityReservation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *SeedSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -8716,6 +10491,30 @@ func (m *SeedSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Ingress != nil { + { + size, err := m.Ingress.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.Settings != nil { + { + size, err := m.Settings.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } if m.Volume != nil { { size, err := m.Volume.MarshalToSizedBuffer(dAtA[:i]) @@ -8819,6 +10618,71 @@ func (m *SeedStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Allocatable) > 0 { + keysForAllocatable := make([]string, 0, len(m.Allocatable)) + for k := range m.Allocatable { + keysForAllocatable = append(keysForAllocatable, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) + for iNdEx := len(keysForAllocatable) - 1; iNdEx >= 0; iNdEx-- { + v := m.Allocatable[k8s_io_api_core_v1.ResourceName(keysForAllocatable[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAllocatable[iNdEx]) + copy(dAtA[i:], keysForAllocatable[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatable[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[k8s_io_api_core_v1.ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if m.ClusterIdentity != nil { + i -= len(*m.ClusterIdentity) + copy(dAtA[i:], *m.ClusterIdentity) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ClusterIdentity))) + i-- + dAtA[i] = 0x2a + } i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) i-- dAtA[i] = 0x20 @@ -8893,6 +10757,49 @@ func (m *SeedTaint) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SeedTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeedTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeedTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *SeedVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9221,6 +11128,50 @@ func (m *ShootSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + } + if m.SeedSelector != nil { + { + size, err := m.SeedSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } if m.SeedName != nil { i -= len(*m.SeedName) copy(dAtA[i:], *m.SeedName) @@ -9377,6 +11328,13 @@ func (m *ShootStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ClusterIdentity != nil { + i -= len(*m.ClusterIdentity) + copy(dAtA[i:], *m.ClusterIdentity) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ClusterIdentity))) + i-- + dAtA[i] = 0x62 + } i -= len(m.UID) copy(dAtA[i:], m.UID) i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) @@ -9484,6 +11442,131 @@ func (m *ShootStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Toleration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Toleration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Toleration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != nil { + i -= len(*m.Value) + copy(dAtA[i:], *m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) + i-- + dAtA[i] = 0x12 + } + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VerticalPodAutoscaler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VerticalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VerticalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RecommenderInterval != nil { + { + size, err := m.RecommenderInterval.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.UpdaterInterval != nil { + { + size, err := m.UpdaterInterval.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.RecommendationMarginFraction != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.RecommendationMarginFraction)))) + i-- + dAtA[i] = 0x31 + } + if m.EvictionTolerance != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.EvictionTolerance)))) + i-- + dAtA[i] = 0x29 + } + if m.EvictionRateLimit != nil { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.EvictionRateLimit)))) + i-- + dAtA[i] = 0x21 + } + if m.EvictionRateBurst != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.EvictionRateBurst)) + i-- + dAtA[i] = 0x18 + } + if m.EvictAfterOOMThreshold != nil { + { + size, err := m.EvictAfterOOMThreshold.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i-- + if m.Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *Volume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9579,6 +11662,48 @@ func (m *VolumeType) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *WatchCacheSizes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchCacheSizes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WatchCacheSizes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Default != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Default)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *Worker) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9599,6 +11724,34 @@ func (m *Worker) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.MachineControllerManagerSettings != nil { + { + size, err := m.MachineControllerManagerSettings.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if m.SystemComponents != nil { + { + size, err := m.SystemComponents.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } if len(m.Zones) > 0 { for iNdEx := len(m.Zones) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Zones[iNdEx]) @@ -9833,6 +11986,37 @@ func (m *WorkerKubernetes) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *WorkerSystemComponents) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkerSystemComponents) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkerSystemComponents) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Allow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { offset -= sovGenerated(v) base := offset @@ -10279,6 +12463,12 @@ func (m *Condition) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Message) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Codes) > 0 { + for _, s := range m.Codes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -10309,6 +12499,14 @@ func (m *ControllerDeployment) Size() (n int) { l = m.ProviderConfig.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Policy != nil { + l = len(*m.Policy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SeedSelector != nil { + l = m.SeedSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -10442,6 +12640,9 @@ func (m *ControllerResource) Size() (n int) { l = m.ReconcileTimeout.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Primary != nil { + n += 2 + } return n } @@ -10513,6 +12714,26 @@ func (m *DNSProvider) Size() (n int) { return n } +func (m *DataVolume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Type != nil { + l = len(*m.Type) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.VolumeSize) + n += 1 + l + sovGenerated(uint64(l)) + if m.Encrypted != nil { + n += 2 + } + return n +} + func (m *Endpoint) Size() (n int) { if m == nil { return 0 @@ -10559,6 +12780,9 @@ func (m *Extension) Size() (n int) { l = m.ProviderConfig.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Disabled != nil { + n += 2 + } return n } @@ -10652,6 +12876,34 @@ func (m *HorizontalPodAutoscalerConfig) Size() (n int) { return n } +func (m *Ingress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Domain) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Controller.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressController) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if m.ProviderConfig != nil { + l = m.ProviderConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *KubeAPIServerConfig) Size() (n int) { if m == nil { return 0 @@ -10695,6 +12947,29 @@ func (m *KubeAPIServerConfig) Size() (n int) { l = m.ServiceAccountConfig.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.WatchCacheSizes != nil { + l = m.WatchCacheSizes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Requests != nil { + l = m.Requests.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *KubeAPIServerRequests) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxNonMutatingInflight != nil { + n += 1 + sovGenerated(uint64(*m.MaxNonMutatingInflight)) + } + if m.MaxMutatingInflight != nil { + n += 1 + sovGenerated(uint64(*m.MaxMutatingInflight)) + } return n } @@ -10713,6 +12988,10 @@ func (m *KubeControllerManagerConfig) Size() (n int) { if m.NodeCIDRMaskSize != nil { n += 1 + sovGenerated(uint64(*m.NodeCIDRMaskSize)) } + if m.PodEvictionTimeout != nil { + l = m.PodEvictionTimeout.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -10739,6 +13018,10 @@ func (m *KubeSchedulerConfig) Size() (n int) { _ = l l = m.KubernetesConfig.Size() n += 1 + l + sovGenerated(uint64(l)) + if m.KubeMaxPDVols != nil { + l = len(*m.KubeMaxPDVols) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -10790,6 +13073,17 @@ func (m *KubeletConfig) Size() (n int) { l = m.ImagePullProgressDeadline.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.FailSwapOn != nil { + n += 2 + } + if m.KubeReserved != nil { + l = m.KubeReserved.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SystemReserved != nil { + l = m.SystemReserved.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -10880,6 +13174,31 @@ func (m *KubeletConfigEvictionSoftGracePeriod) Size() (n int) { return n } +func (m *KubeletConfigReserved) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CPU != nil { + l = m.CPU.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Memory != nil { + l = m.Memory.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EphemeralStorage != nil { + l = m.EphemeralStorage.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PID != nil { + l = m.PID.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *Kubernetes) Size() (n int) { if m == nil { return 0 @@ -10915,6 +13234,10 @@ func (m *Kubernetes) Size() (n int) { } l = len(m.Version) n += 1 + l + sovGenerated(uint64(l)) + if m.VerticalPodAutoscaler != nil { + l = m.VerticalPodAutoscaler.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -11034,6 +13357,36 @@ func (m *Machine) Size() (n int) { return n } +func (m *MachineControllerManagerSettings) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MachineDrainTimeout != nil { + l = m.MachineDrainTimeout.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MachineHealthTimeout != nil { + l = m.MachineHealthTimeout.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MachineCreationTimeout != nil { + l = m.MachineCreationTimeout.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxEvictRetries != nil { + n += 1 + sovGenerated(uint64(*m.MaxEvictRetries)) + } + if len(m.NodeConditions) > 0 { + for _, s := range m.NodeConditions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *MachineImage) Size() (n int) { if m == nil { return 0 @@ -11051,6 +13404,23 @@ func (m *MachineImage) Size() (n int) { return n } +func (m *MachineImageVersion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ExpirableVersion.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.CRI) > 0 { + for _, e := range m.CRI { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *MachineType) Size() (n int) { if m == nil { return 0 @@ -11104,6 +13474,9 @@ func (m *Maintenance) Size() (n int) { l = m.TimeWindow.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ConfineSpecUpdateRollout != nil { + n += 2 + } return n } @@ -11144,6 +13517,19 @@ func (m *Monitoring) Size() (n int) { return n } +func (m *NamedResourceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ResourceRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *Networking) Size() (n int) { if m == nil { return 0 @@ -11430,6 +13816,10 @@ func (m *ProjectSpec) Size() (n int) { l = len(*m.Namespace) n += 1 + l + sovGenerated(uint64(l)) } + if m.Tolerations != nil { + l = m.Tolerations.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -11442,6 +13832,35 @@ func (m *ProjectStatus) Size() (n int) { n += 1 + sovGenerated(uint64(m.ObservedGeneration)) l = len(m.Phase) n += 1 + l + sovGenerated(uint64(l)) + if m.StaleSinceTimestamp != nil { + l = m.StaleSinceTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.StaleAutoDeleteTimestamp != nil { + l = m.StaleAutoDeleteTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ProjectTolerations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Defaults) > 0 { + for _, e := range m.Defaults { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Whitelist) > 0 { + for _, e := range m.Whitelist { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -11470,17 +13889,6 @@ func (m *Provider) Size() (n int) { return n } -func (m *ProviderConfig) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.RawExtension.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *Quota) Size() (n int) { if m == nil { return 0 @@ -11548,6 +13956,30 @@ func (m *Region) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ResourceWatchCacheSize) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.APIGroup != nil { + l = len(*m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.CacheSize)) return n } @@ -11629,8 +14061,35 @@ func (m *SeedDNS) Size() (n int) { } var l int _ = l - l = len(m.IngressDomain) + if m.IngressDomain != nil { + l = len(*m.IngressDomain) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Provider != nil { + l = m.Provider.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SeedDNSProvider) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.SecretRef.Size() n += 1 + l + sovGenerated(uint64(l)) + if m.Domains != nil { + l = m.Domains.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Zones != nil { + l = m.Zones.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -11695,6 +14154,111 @@ func (m *SeedProvider) Size() (n int) { return n } +func (m *SeedSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ProviderTypes) > 0 { + for _, s := range m.ProviderTypes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SeedSettingExcessCapacityReservation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} + +func (m *SeedSettingLoadBalancerServices) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SeedSettingScheduling) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} + +func (m *SeedSettingShootDNS) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} + +func (m *SeedSettingVerticalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} + +func (m *SeedSettings) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExcessCapacityReservation != nil { + l = m.ExcessCapacityReservation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scheduling != nil { + l = m.Scheduling.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ShootDNS != nil { + l = m.ShootDNS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LoadBalancerServices != nil { + l = m.LoadBalancerServices.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VerticalPodAutoscaler != nil { + l = m.VerticalPodAutoscaler.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *SeedSpec) Size() (n int) { if m == nil { return 0 @@ -11725,6 +14289,14 @@ func (m *SeedSpec) Size() (n int) { l = m.Volume.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Settings != nil { + l = m.Settings.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Ingress != nil { + l = m.Ingress.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -11749,6 +14321,28 @@ func (m *SeedStatus) Size() (n int) { } } n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if m.ClusterIdentity != nil { + l = len(*m.ClusterIdentity) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Allocatable) > 0 { + for k, v := range m.Allocatable { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } return n } @@ -11767,6 +14361,19 @@ func (m *SeedTaint) Size() (n int) { return n } +func (m *SeedTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *SeedVolume) Size() (n int) { if m == nil { return 0 @@ -11936,6 +14543,22 @@ func (m *ShootSpec) Size() (n int) { l = len(*m.SeedName) n += 1 + l + sovGenerated(uint64(l)) } + if m.SeedSelector != nil { + l = m.SeedSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -11983,6 +14606,59 @@ func (m *ShootStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.UID) n += 1 + l + sovGenerated(uint64(l)) + if m.ClusterIdentity != nil { + l = len(*m.ClusterIdentity) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Toleration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VerticalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if m.EvictAfterOOMThreshold != nil { + l = m.EvictAfterOOMThreshold.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EvictionRateBurst != nil { + n += 1 + sovGenerated(uint64(*m.EvictionRateBurst)) + } + if m.EvictionRateLimit != nil { + n += 9 + } + if m.EvictionTolerance != nil { + n += 9 + } + if m.RecommendationMarginFraction != nil { + n += 9 + } + if m.UpdaterInterval != nil { + l = m.UpdaterInterval.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RecommenderInterval != nil { + l = m.RecommenderInterval.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -12024,6 +14700,24 @@ func (m *VolumeType) Size() (n int) { return n } +func (m *WatchCacheSizes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Default != nil { + n += 1 + sovGenerated(uint64(*m.Default)) + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *Worker) Size() (n int) { if m == nil { return 0 @@ -12102,6 +14796,14 @@ func (m *Worker) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } + if m.SystemComponents != nil { + l = m.SystemComponents.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.MachineControllerManagerSettings != nil { + l = m.MachineControllerManagerSettings.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -12118,6 +14820,16 @@ func (m *WorkerKubernetes) Size() (n int) { return n } +func (m *WorkerSystemComponents) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} + func sovGenerated(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -12151,7 +14863,7 @@ func (this *AdmissionPlugin) String() string { } s := strings.Join([]string{`&AdmissionPlugin{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Config:` + strings.Replace(this.Config.String(), "ProviderConfig", "ProviderConfig", 1) + `,`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "RawExtension", "runtime.RawExtension", 1) + `,`, `}`, }, "") return s @@ -12243,7 +14955,7 @@ func (this *BackupBucketSpec) String() string { } s := strings.Join([]string{`&BackupBucketSpec{`, `Provider:` + strings.Replace(strings.Replace(this.Provider.String(), "BackupBucketProvider", "BackupBucketProvider", 1), `&`, ``, 1) + `,`, - `ProviderConfig:` + strings.Replace(this.ProviderConfig.String(), "ProviderConfig", "ProviderConfig", 1) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, `SecretRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "v1.SecretReference", 1), `&`, ``, 1) + `,`, `SeedName:` + valueToStringGenerated(this.SeedName) + `,`, `}`, @@ -12255,7 +14967,7 @@ func (this *BackupBucketStatus) String() string { return "nil" } s := strings.Join([]string{`&BackupBucketStatus{`, - `ProviderStatus:` + strings.Replace(this.ProviderStatus.String(), "ProviderConfig", "ProviderConfig", 1) + `,`, + `ProviderStatus:` + strings.Replace(fmt.Sprintf("%v", this.ProviderStatus), "RawExtension", "runtime.RawExtension", 1) + `,`, `LastOperation:` + strings.Replace(this.LastOperation.String(), "LastOperation", "LastOperation", 1) + `,`, `LastError:` + strings.Replace(this.LastError.String(), "LastError", "LastError", 1) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, @@ -12398,9 +15110,9 @@ func (this *CloudProfileSpec) String() string { `Kubernetes:` + strings.Replace(strings.Replace(this.Kubernetes.String(), "KubernetesSettings", "KubernetesSettings", 1), `&`, ``, 1) + `,`, `MachineImages:` + repeatedStringForMachineImages + `,`, `MachineTypes:` + repeatedStringForMachineTypes + `,`, - `ProviderConfig:` + strings.Replace(this.ProviderConfig.String(), "ProviderConfig", "ProviderConfig", 1) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, `Regions:` + repeatedStringForRegions + `,`, - `SeedSelector:` + strings.Replace(fmt.Sprintf("%v", this.SeedSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`, + `SeedSelector:` + strings.Replace(this.SeedSelector.String(), "SeedSelector", "SeedSelector", 1) + `,`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `VolumeTypes:` + repeatedStringForVolumeTypes + `,`, `}`, @@ -12444,6 +15156,7 @@ func (this *Condition) String() string { `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Codes:` + fmt.Sprintf("%v", this.Codes) + `,`, `}`, }, "") return s @@ -12454,7 +15167,7 @@ func (this *ContainerRuntime) String() string { } s := strings.Join([]string{`&ContainerRuntime{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ProviderConfig:` + strings.Replace(this.ProviderConfig.String(), "ProviderConfig", "ProviderConfig", 1) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, `}`, }, "") return s @@ -12465,7 +15178,9 @@ func (this *ControllerDeployment) String() string { } s := strings.Join([]string{`&ControllerDeployment{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ProviderConfig:` + strings.Replace(this.ProviderConfig.String(), "ProviderConfig", "ProviderConfig", 1) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, + `Policy:` + valueToStringGenerated(this.Policy) + `,`, + `SeedSelector:` + strings.Replace(fmt.Sprintf("%v", this.SeedSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -12520,7 +15235,7 @@ func (this *ControllerInstallationStatus) String() string { repeatedStringForConditions += "}" s := strings.Join([]string{`&ControllerInstallationStatus{`, `Conditions:` + repeatedStringForConditions + `,`, - `ProviderStatus:` + strings.Replace(this.ProviderStatus.String(), "ProviderConfig", "ProviderConfig", 1) + `,`, + `ProviderStatus:` + strings.Replace(fmt.Sprintf("%v", this.ProviderStatus), "RawExtension", "runtime.RawExtension", 1) + `,`, `}`, }, "") return s @@ -12577,6 +15292,7 @@ func (this *ControllerResource) String() string { `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `GloballyEnabled:` + valueToStringGenerated(this.GloballyEnabled) + `,`, `ReconcileTimeout:` + strings.Replace(fmt.Sprintf("%v", this.ReconcileTimeout), "Duration", "v11.Duration", 1) + `,`, + `Primary:` + valueToStringGenerated(this.Primary) + `,`, `}`, }, "") return s @@ -12622,6 +15338,19 @@ func (this *DNSProvider) String() string { }, "") return s } +func (this *DataVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DataVolume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Type:` + valueToStringGenerated(this.Type) + `,`, + `VolumeSize:` + fmt.Sprintf("%v", this.VolumeSize) + `,`, + `Encrypted:` + valueToStringGenerated(this.Encrypted) + `,`, + `}`, + }, "") + return s +} func (this *Endpoint) String() string { if this == nil { return "nil" @@ -12652,7 +15381,8 @@ func (this *Extension) String() string { } s := strings.Join([]string{`&Extension{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ProviderConfig:` + strings.Replace(this.ProviderConfig.String(), "ProviderConfig", "ProviderConfig", 1) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, + `Disabled:` + valueToStringGenerated(this.Disabled) + `,`, `}`, }, "") return s @@ -12713,6 +15443,28 @@ func (this *HorizontalPodAutoscalerConfig) String() string { }, "") return s } +func (this *Ingress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Ingress{`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `Controller:` + strings.Replace(strings.Replace(this.Controller.String(), "IngressController", "IngressController", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressController) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressController{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, + `}`, + }, "") + return s +} func (this *KubeAPIServerConfig) String() string { if this == nil { return "nil" @@ -12741,6 +15493,19 @@ func (this *KubeAPIServerConfig) String() string { `OIDCConfig:` + strings.Replace(this.OIDCConfig.String(), "OIDCConfig", "OIDCConfig", 1) + `,`, `RuntimeConfig:` + mapStringForRuntimeConfig + `,`, `ServiceAccountConfig:` + strings.Replace(this.ServiceAccountConfig.String(), "ServiceAccountConfig", "ServiceAccountConfig", 1) + `,`, + `WatchCacheSizes:` + strings.Replace(this.WatchCacheSizes.String(), "WatchCacheSizes", "WatchCacheSizes", 1) + `,`, + `Requests:` + strings.Replace(this.Requests.String(), "KubeAPIServerRequests", "KubeAPIServerRequests", 1) + `,`, + `}`, + }, "") + return s +} +func (this *KubeAPIServerRequests) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KubeAPIServerRequests{`, + `MaxNonMutatingInflight:` + valueToStringGenerated(this.MaxNonMutatingInflight) + `,`, + `MaxMutatingInflight:` + valueToStringGenerated(this.MaxMutatingInflight) + `,`, `}`, }, "") return s @@ -12753,6 +15518,7 @@ func (this *KubeControllerManagerConfig) String() string { `KubernetesConfig:` + strings.Replace(strings.Replace(this.KubernetesConfig.String(), "KubernetesConfig", "KubernetesConfig", 1), `&`, ``, 1) + `,`, `HorizontalPodAutoscalerConfig:` + strings.Replace(this.HorizontalPodAutoscalerConfig.String(), "HorizontalPodAutoscalerConfig", "HorizontalPodAutoscalerConfig", 1) + `,`, `NodeCIDRMaskSize:` + valueToStringGenerated(this.NodeCIDRMaskSize) + `,`, + `PodEvictionTimeout:` + strings.Replace(fmt.Sprintf("%v", this.PodEvictionTimeout), "Duration", "v11.Duration", 1) + `,`, `}`, }, "") return s @@ -12774,6 +15540,7 @@ func (this *KubeSchedulerConfig) String() string { } s := strings.Join([]string{`&KubeSchedulerConfig{`, `KubernetesConfig:` + strings.Replace(strings.Replace(this.KubernetesConfig.String(), "KubernetesConfig", "KubernetesConfig", 1), `&`, ``, 1) + `,`, + `KubeMaxPDVols:` + valueToStringGenerated(this.KubeMaxPDVols) + `,`, `}`, }, "") return s @@ -12795,6 +15562,9 @@ func (this *KubeletConfig) String() string { `MaxPods:` + valueToStringGenerated(this.MaxPods) + `,`, `PodPIDsLimit:` + valueToStringGenerated(this.PodPIDsLimit) + `,`, `ImagePullProgressDeadline:` + strings.Replace(fmt.Sprintf("%v", this.ImagePullProgressDeadline), "Duration", "v11.Duration", 1) + `,`, + `FailSwapOn:` + valueToStringGenerated(this.FailSwapOn) + `,`, + `KubeReserved:` + strings.Replace(this.KubeReserved.String(), "KubeletConfigReserved", "KubeletConfigReserved", 1) + `,`, + `SystemReserved:` + strings.Replace(this.SystemReserved.String(), "KubeletConfigReserved", "KubeletConfigReserved", 1) + `,`, `}`, }, "") return s @@ -12841,6 +15611,19 @@ func (this *KubeletConfigEvictionSoftGracePeriod) String() string { }, "") return s } +func (this *KubeletConfigReserved) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KubeletConfigReserved{`, + `CPU:` + strings.Replace(fmt.Sprintf("%v", this.CPU), "Quantity", "resource.Quantity", 1) + `,`, + `Memory:` + strings.Replace(fmt.Sprintf("%v", this.Memory), "Quantity", "resource.Quantity", 1) + `,`, + `EphemeralStorage:` + strings.Replace(fmt.Sprintf("%v", this.EphemeralStorage), "Quantity", "resource.Quantity", 1) + `,`, + `PID:` + strings.Replace(fmt.Sprintf("%v", this.PID), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} func (this *Kubernetes) String() string { if this == nil { return "nil" @@ -12854,6 +15637,7 @@ func (this *Kubernetes) String() string { `KubeProxy:` + strings.Replace(this.KubeProxy.String(), "KubeProxyConfig", "KubeProxyConfig", 1) + `,`, `Kubelet:` + strings.Replace(this.Kubelet.String(), "KubeletConfig", "KubeletConfig", 1) + `,`, `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `VerticalPodAutoscaler:` + strings.Replace(this.VerticalPodAutoscaler.String(), "VerticalPodAutoscaler", "VerticalPodAutoscaler", 1) + `,`, `}`, }, "") return s @@ -12952,13 +15736,27 @@ func (this *Machine) String() string { }, "") return s } +func (this *MachineControllerManagerSettings) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MachineControllerManagerSettings{`, + `MachineDrainTimeout:` + strings.Replace(fmt.Sprintf("%v", this.MachineDrainTimeout), "Duration", "v11.Duration", 1) + `,`, + `MachineHealthTimeout:` + strings.Replace(fmt.Sprintf("%v", this.MachineHealthTimeout), "Duration", "v11.Duration", 1) + `,`, + `MachineCreationTimeout:` + strings.Replace(fmt.Sprintf("%v", this.MachineCreationTimeout), "Duration", "v11.Duration", 1) + `,`, + `MaxEvictRetries:` + valueToStringGenerated(this.MaxEvictRetries) + `,`, + `NodeConditions:` + fmt.Sprintf("%v", this.NodeConditions) + `,`, + `}`, + }, "") + return s +} func (this *MachineImage) String() string { if this == nil { return "nil" } - repeatedStringForVersions := "[]ExpirableVersion{" + repeatedStringForVersions := "[]MachineImageVersion{" for _, f := range this.Versions { - repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "ExpirableVersion", "ExpirableVersion", 1), `&`, ``, 1) + "," + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "MachineImageVersion", "MachineImageVersion", 1), `&`, ``, 1) + "," } repeatedStringForVersions += "}" s := strings.Join([]string{`&MachineImage{`, @@ -12968,6 +15766,22 @@ func (this *MachineImage) String() string { }, "") return s } +func (this *MachineImageVersion) String() string { + if this == nil { + return "nil" + } + repeatedStringForCRI := "[]CRI{" + for _, f := range this.CRI { + repeatedStringForCRI += strings.Replace(strings.Replace(f.String(), "CRI", "CRI", 1), `&`, ``, 1) + "," + } + repeatedStringForCRI += "}" + s := strings.Join([]string{`&MachineImageVersion{`, + `ExpirableVersion:` + strings.Replace(strings.Replace(this.ExpirableVersion.String(), "ExpirableVersion", "ExpirableVersion", 1), `&`, ``, 1) + `,`, + `CRI:` + repeatedStringForCRI + `,`, + `}`, + }, "") + return s +} func (this *MachineType) String() string { if this == nil { return "nil" @@ -13002,6 +15816,7 @@ func (this *Maintenance) String() string { s := strings.Join([]string{`&Maintenance{`, `AutoUpdate:` + strings.Replace(this.AutoUpdate.String(), "MaintenanceAutoUpdate", "MaintenanceAutoUpdate", 1) + `,`, `TimeWindow:` + strings.Replace(this.TimeWindow.String(), "MaintenanceTimeWindow", "MaintenanceTimeWindow", 1) + `,`, + `ConfineSpecUpdateRollout:` + valueToStringGenerated(this.ConfineSpecUpdateRollout) + `,`, `}`, }, "") return s @@ -13038,13 +15853,24 @@ func (this *Monitoring) String() string { }, "") return s } +func (this *NamedResourceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedResourceReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ResourceRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceRef), "CrossVersionObjectReference", "v12.CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *Networking) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&Networking{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ProviderConfig:` + strings.Replace(this.ProviderConfig.String(), "ProviderConfig", "ProviderConfig", 1) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, `Pods:` + valueToStringGenerated(this.Pods) + `,`, `Nodes:` + valueToStringGenerated(this.Nodes) + `,`, `Services:` + valueToStringGenerated(this.Services) + `,`, @@ -13219,7 +16045,7 @@ func (this *ProjectMember) String() string { return "nil" } s := strings.Join([]string{`&ProjectMember{`, - `Subject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subject), "Subject", "v12.Subject", 1), `&`, ``, 1) + `,`, + `Subject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subject), "Subject", "v13.Subject", 1), `&`, ``, 1) + `,`, `Role:` + fmt.Sprintf("%v", this.Role) + `,`, `Roles:` + fmt.Sprintf("%v", this.Roles) + `,`, `}`, @@ -13236,12 +16062,13 @@ func (this *ProjectSpec) String() string { } repeatedStringForMembers += "}" s := strings.Join([]string{`&ProjectSpec{`, - `CreatedBy:` + strings.Replace(fmt.Sprintf("%v", this.CreatedBy), "Subject", "v12.Subject", 1) + `,`, + `CreatedBy:` + strings.Replace(fmt.Sprintf("%v", this.CreatedBy), "Subject", "v13.Subject", 1) + `,`, `Description:` + valueToStringGenerated(this.Description) + `,`, - `Owner:` + strings.Replace(fmt.Sprintf("%v", this.Owner), "Subject", "v12.Subject", 1) + `,`, + `Owner:` + strings.Replace(fmt.Sprintf("%v", this.Owner), "Subject", "v13.Subject", 1) + `,`, `Purpose:` + valueToStringGenerated(this.Purpose) + `,`, `Members:` + repeatedStringForMembers + `,`, `Namespace:` + valueToStringGenerated(this.Namespace) + `,`, + `Tolerations:` + strings.Replace(this.Tolerations.String(), "ProjectTolerations", "ProjectTolerations", 1) + `,`, `}`, }, "") return s @@ -13253,6 +16080,29 @@ func (this *ProjectStatus) String() string { s := strings.Join([]string{`&ProjectStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `StaleSinceTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.StaleSinceTimestamp), "Time", "v11.Time", 1) + `,`, + `StaleAutoDeleteTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.StaleAutoDeleteTimestamp), "Time", "v11.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectTolerations) String() string { + if this == nil { + return "nil" + } + repeatedStringForDefaults := "[]Toleration{" + for _, f := range this.Defaults { + repeatedStringForDefaults += strings.Replace(strings.Replace(f.String(), "Toleration", "Toleration", 1), `&`, ``, 1) + "," + } + repeatedStringForDefaults += "}" + repeatedStringForWhitelist := "[]Toleration{" + for _, f := range this.Whitelist { + repeatedStringForWhitelist += strings.Replace(strings.Replace(f.String(), "Toleration", "Toleration", 1), `&`, ``, 1) + "," + } + repeatedStringForWhitelist += "}" + s := strings.Join([]string{`&ProjectTolerations{`, + `Defaults:` + repeatedStringForDefaults + `,`, + `Whitelist:` + repeatedStringForWhitelist + `,`, `}`, }, "") return s @@ -13268,23 +16118,13 @@ func (this *Provider) String() string { repeatedStringForWorkers += "}" s := strings.Join([]string{`&Provider{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ControlPlaneConfig:` + strings.Replace(this.ControlPlaneConfig.String(), "ProviderConfig", "ProviderConfig", 1) + `,`, - `InfrastructureConfig:` + strings.Replace(this.InfrastructureConfig.String(), "ProviderConfig", "ProviderConfig", 1) + `,`, + `ControlPlaneConfig:` + strings.Replace(fmt.Sprintf("%v", this.ControlPlaneConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, + `InfrastructureConfig:` + strings.Replace(fmt.Sprintf("%v", this.InfrastructureConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, `Workers:` + repeatedStringForWorkers + `,`, `}`, }, "") return s } -func (this *ProviderConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ProviderConfig{`, - `RawExtension:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RawExtension), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *Quota) String() string { if this == nil { return "nil" @@ -13343,9 +16183,32 @@ func (this *Region) String() string { repeatedStringForZones += strings.Replace(strings.Replace(f.String(), "AvailabilityZone", "AvailabilityZone", 1), `&`, ``, 1) + "," } repeatedStringForZones += "}" + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" s := strings.Join([]string{`&Region{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Zones:` + repeatedStringForZones + `,`, + `Labels:` + mapStringForLabels + `,`, + `}`, + }, "") + return s +} +func (this *ResourceWatchCacheSize) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceWatchCacheSize{`, + `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `CacheSize:` + fmt.Sprintf("%v", this.CacheSize) + `,`, `}`, }, "") return s @@ -13401,7 +16264,7 @@ func (this *SeedBackup) String() string { } s := strings.Join([]string{`&SeedBackup{`, `Provider:` + fmt.Sprintf("%v", this.Provider) + `,`, - `ProviderConfig:` + strings.Replace(this.ProviderConfig.String(), "ProviderConfig", "ProviderConfig", 1) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, `Region:` + valueToStringGenerated(this.Region) + `,`, `SecretRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "v1.SecretReference", 1), `&`, ``, 1) + `,`, `}`, @@ -13413,7 +16276,21 @@ func (this *SeedDNS) String() string { return "nil" } s := strings.Join([]string{`&SeedDNS{`, - `IngressDomain:` + fmt.Sprintf("%v", this.IngressDomain) + `,`, + `IngressDomain:` + valueToStringGenerated(this.IngressDomain) + `,`, + `Provider:` + strings.Replace(this.Provider.String(), "SeedDNSProvider", "SeedDNSProvider", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SeedDNSProvider) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedDNSProvider{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `SecretRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "v1.SecretReference", 1), `&`, ``, 1) + `,`, + `Domains:` + strings.Replace(this.Domains.String(), "DNSIncludeExclude", "DNSIncludeExclude", 1) + `,`, + `Zones:` + strings.Replace(this.Zones.String(), "DNSIncludeExclude", "DNSIncludeExclude", 1) + `,`, `}`, }, "") return s @@ -13454,12 +16331,97 @@ func (this *SeedProvider) String() string { } s := strings.Join([]string{`&SeedProvider{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ProviderConfig:` + strings.Replace(this.ProviderConfig.String(), "ProviderConfig", "ProviderConfig", 1) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, `Region:` + fmt.Sprintf("%v", this.Region) + `,`, `}`, }, "") return s } +func (this *SeedSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedSelector{`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`, + `ProviderTypes:` + fmt.Sprintf("%v", this.ProviderTypes) + `,`, + `}`, + }, "") + return s +} +func (this *SeedSettingExcessCapacityReservation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedSettingExcessCapacityReservation{`, + `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`, + `}`, + }, "") + return s +} +func (this *SeedSettingLoadBalancerServices) String() string { + if this == nil { + return "nil" + } + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&SeedSettingLoadBalancerServices{`, + `Annotations:` + mapStringForAnnotations + `,`, + `}`, + }, "") + return s +} +func (this *SeedSettingScheduling) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedSettingScheduling{`, + `Visible:` + fmt.Sprintf("%v", this.Visible) + `,`, + `}`, + }, "") + return s +} +func (this *SeedSettingShootDNS) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedSettingShootDNS{`, + `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`, + `}`, + }, "") + return s +} +func (this *SeedSettingVerticalPodAutoscaler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedSettingVerticalPodAutoscaler{`, + `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`, + `}`, + }, "") + return s +} +func (this *SeedSettings) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedSettings{`, + `ExcessCapacityReservation:` + strings.Replace(this.ExcessCapacityReservation.String(), "SeedSettingExcessCapacityReservation", "SeedSettingExcessCapacityReservation", 1) + `,`, + `Scheduling:` + strings.Replace(this.Scheduling.String(), "SeedSettingScheduling", "SeedSettingScheduling", 1) + `,`, + `ShootDNS:` + strings.Replace(this.ShootDNS.String(), "SeedSettingShootDNS", "SeedSettingShootDNS", 1) + `,`, + `LoadBalancerServices:` + strings.Replace(this.LoadBalancerServices.String(), "SeedSettingLoadBalancerServices", "SeedSettingLoadBalancerServices", 1) + `,`, + `VerticalPodAutoscaler:` + strings.Replace(this.VerticalPodAutoscaler.String(), "SeedSettingVerticalPodAutoscaler", "SeedSettingVerticalPodAutoscaler", 1) + `,`, + `}`, + }, "") + return s +} func (this *SeedSpec) String() string { if this == nil { return "nil" @@ -13477,6 +16439,8 @@ func (this *SeedSpec) String() string { `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "v1.SecretReference", 1) + `,`, `Taints:` + repeatedStringForTaints + `,`, `Volume:` + strings.Replace(this.Volume.String(), "SeedVolume", "SeedVolume", 1) + `,`, + `Settings:` + strings.Replace(this.Settings.String(), "SeedSettings", "SeedSettings", 1) + `,`, + `Ingress:` + strings.Replace(this.Ingress.String(), "Ingress", "Ingress", 1) + `,`, `}`, }, "") return s @@ -13490,11 +16454,34 @@ func (this *SeedStatus) String() string { repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + "," } repeatedStringForConditions += "}" + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForCapacity += "}" + keysForAllocatable := make([]string, 0, len(this.Allocatable)) + for k := range this.Allocatable { + keysForAllocatable = append(keysForAllocatable, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) + mapStringForAllocatable := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForAllocatable { + mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForAllocatable += "}" s := strings.Join([]string{`&SeedStatus{`, `Gardener:` + strings.Replace(this.Gardener.String(), "Gardener", "Gardener", 1) + `,`, `KubernetesVersion:` + valueToStringGenerated(this.KubernetesVersion) + `,`, `Conditions:` + repeatedStringForConditions + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ClusterIdentity:` + valueToStringGenerated(this.ClusterIdentity) + `,`, + `Capacity:` + mapStringForCapacity + `,`, + `Allocatable:` + mapStringForAllocatable + `,`, `}`, }, "") return s @@ -13510,6 +16497,17 @@ func (this *SeedTaint) String() string { }, "") return s } +func (this *SeedTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeedTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SeedSpec", "SeedSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *SeedVolume) String() string { if this == nil { return "nil" @@ -13582,7 +16580,7 @@ func (this *ShootMachineImage) String() string { } s := strings.Join([]string{`&ShootMachineImage{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ProviderConfig:` + strings.Replace(this.ProviderConfig.String(), "ProviderConfig", "ProviderConfig", 1) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, `Version:` + valueToStringGenerated(this.Version) + `,`, `}`, }, "") @@ -13608,6 +16606,16 @@ func (this *ShootSpec) String() string { repeatedStringForExtensions += strings.Replace(strings.Replace(f.String(), "Extension", "Extension", 1), `&`, ``, 1) + "," } repeatedStringForExtensions += "}" + repeatedStringForResources := "[]NamedResourceReference{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "NamedResourceReference", "NamedResourceReference", 1), `&`, ``, 1) + "," + } + repeatedStringForResources += "}" + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "Toleration", "Toleration", 1), `&`, ``, 1) + "," + } + repeatedStringForTolerations += "}" s := strings.Join([]string{`&ShootSpec{`, `Addons:` + strings.Replace(this.Addons.String(), "Addons", "Addons", 1) + `,`, `CloudProfileName:` + fmt.Sprintf("%v", this.CloudProfileName) + `,`, @@ -13623,6 +16631,9 @@ func (this *ShootSpec) String() string { `Region:` + fmt.Sprintf("%v", this.Region) + `,`, `SecretBindingName:` + fmt.Sprintf("%v", this.SecretBindingName) + `,`, `SeedName:` + valueToStringGenerated(this.SeedName) + `,`, + `SeedSelector:` + strings.Replace(this.SeedSelector.String(), "SeedSelector", "SeedSelector", 1) + `,`, + `Resources:` + repeatedStringForResources + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, `}`, }, "") return s @@ -13658,6 +16669,35 @@ func (this *ShootStatus) String() string { `SeedName:` + valueToStringGenerated(this.SeedName) + `,`, `TechnicalID:` + fmt.Sprintf("%v", this.TechnicalID) + `,`, `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ClusterIdentity:` + valueToStringGenerated(this.ClusterIdentity) + `,`, + `}`, + }, "") + return s +} +func (this *Toleration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Toleration{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + valueToStringGenerated(this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *VerticalPodAutoscaler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VerticalPodAutoscaler{`, + `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`, + `EvictAfterOOMThreshold:` + strings.Replace(fmt.Sprintf("%v", this.EvictAfterOOMThreshold), "Duration", "v11.Duration", 1) + `,`, + `EvictionRateBurst:` + valueToStringGenerated(this.EvictionRateBurst) + `,`, + `EvictionRateLimit:` + valueToStringGenerated(this.EvictionRateLimit) + `,`, + `EvictionTolerance:` + valueToStringGenerated(this.EvictionTolerance) + `,`, + `RecommendationMarginFraction:` + valueToStringGenerated(this.RecommendationMarginFraction) + `,`, + `UpdaterInterval:` + strings.Replace(fmt.Sprintf("%v", this.UpdaterInterval), "Duration", "v11.Duration", 1) + `,`, + `RecommenderInterval:` + strings.Replace(fmt.Sprintf("%v", this.RecommenderInterval), "Duration", "v11.Duration", 1) + `,`, `}`, }, "") return s @@ -13687,6 +16727,22 @@ func (this *VolumeType) String() string { }, "") return s } +func (this *WatchCacheSizes) String() string { + if this == nil { + return "nil" + } + repeatedStringForResources := "[]ResourceWatchCacheSize{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "ResourceWatchCacheSize", "ResourceWatchCacheSize", 1), `&`, ``, 1) + "," + } + repeatedStringForResources += "}" + s := strings.Join([]string{`&WatchCacheSizes{`, + `Default:` + valueToStringGenerated(this.Default) + `,`, + `Resources:` + repeatedStringForResources + `,`, + `}`, + }, "") + return s +} func (this *Worker) String() string { if this == nil { return "nil" @@ -13696,9 +16752,9 @@ func (this *Worker) String() string { repeatedStringForTaints += fmt.Sprintf("%v", f) + "," } repeatedStringForTaints += "}" - repeatedStringForDataVolumes := "[]Volume{" + repeatedStringForDataVolumes := "[]DataVolume{" for _, f := range this.DataVolumes { - repeatedStringForDataVolumes += strings.Replace(strings.Replace(f.String(), "Volume", "Volume", 1), `&`, ``, 1) + "," + repeatedStringForDataVolumes += strings.Replace(strings.Replace(f.String(), "DataVolume", "DataVolume", 1), `&`, ``, 1) + "," } repeatedStringForDataVolumes += "}" keysForAnnotations := make([]string, 0, len(this.Annotations)) @@ -13733,12 +16789,14 @@ func (this *Worker) String() string { `Minimum:` + fmt.Sprintf("%v", this.Minimum) + `,`, `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `ProviderConfig:` + strings.Replace(this.ProviderConfig.String(), "ProviderConfig", "ProviderConfig", 1) + `,`, + `ProviderConfig:` + strings.Replace(fmt.Sprintf("%v", this.ProviderConfig), "RawExtension", "runtime.RawExtension", 1) + `,`, `Taints:` + repeatedStringForTaints + `,`, `Volume:` + strings.Replace(this.Volume.String(), "Volume", "Volume", 1) + `,`, `DataVolumes:` + repeatedStringForDataVolumes + `,`, `KubeletDataVolumeName:` + valueToStringGenerated(this.KubeletDataVolumeName) + `,`, `Zones:` + fmt.Sprintf("%v", this.Zones) + `,`, + `SystemComponents:` + strings.Replace(this.SystemComponents.String(), "WorkerSystemComponents", "WorkerSystemComponents", 1) + `,`, + `MachineControllerManagerSettings:` + strings.Replace(this.MachineControllerManagerSettings.String(), "MachineControllerManagerSettings", "MachineControllerManagerSettings", 1) + `,`, `}`, }, "") return s @@ -13753,6 +16811,16 @@ func (this *WorkerKubernetes) String() string { }, "") return s } +func (this *WorkerSystemComponents) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WorkerSystemComponents{`, + `Allow:` + fmt.Sprintf("%v", this.Allow) + `,`, + `}`, + }, "") + return s +} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -14050,7 +17118,7 @@ func (m *AdmissionPlugin) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Config == nil { - m.Config = &ProviderConfig{} + m.Config = &runtime.RawExtension{} } if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -14973,7 +18041,7 @@ func (m *BackupBucketSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ProviderConfig == nil { - m.ProviderConfig = &ProviderConfig{} + m.ProviderConfig = &runtime.RawExtension{} } if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -15128,7 +18196,7 @@ func (m *BackupBucketStatus) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ProviderStatus == nil { - m.ProviderStatus = &ProviderConfig{} + m.ProviderStatus = &runtime.RawExtension{} } if err := m.ProviderStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -16487,7 +19555,7 @@ func (m *CloudProfileSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ProviderConfig == nil { - m.ProviderConfig = &ProviderConfig{} + m.ProviderConfig = &runtime.RawExtension{} } if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -16557,7 +19625,7 @@ func (m *CloudProfileSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.SeedSelector == nil { - m.SeedSelector = &v11.LabelSelector{} + m.SeedSelector = &SeedSelector{} } if err := m.SeedSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -17240,6 +20308,38 @@ func (m *Condition) Unmarshal(dAtA []byte) error { } m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Codes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Codes = append(m.Codes, ErrorCode(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -17355,7 +20455,7 @@ func (m *ContainerRuntime) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ProviderConfig == nil { - m.ProviderConfig = &ProviderConfig{} + m.ProviderConfig = &runtime.RawExtension{} } if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -17476,12 +20576,81 @@ func (m *ControllerDeployment) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ProviderConfig == nil { - m.ProviderConfig = &ProviderConfig{} + m.ProviderConfig = &runtime.RawExtension{} } if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Policy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ControllerDeploymentPolicy(dAtA[iNdEx:postIndex]) + m.Policy = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeedSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SeedSelector == nil { + m.SeedSelector = &v11.LabelSelector{} + } + if err := m.SeedSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -17990,7 +21159,7 @@ func (m *ControllerInstallationStatus) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ProviderStatus == nil { - m.ProviderStatus = &ProviderConfig{} + m.ProviderStatus = &runtime.RawExtension{} } if err := m.ProviderStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -18532,97 +21701,11 @@ func (m *ControllerResource) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DNS) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DNS: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DNS: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Domain = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Primary", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18632,26 +21715,13 @@ func (m *DNS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Providers = append(m.Providers, DNSProvider{}) - if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + b := bool(v != 0) + m.Primary = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -18676,7 +21746,7 @@ func (m *DNS) Unmarshal(dAtA []byte) error { } return nil } -func (m *DNSIncludeExclude) Unmarshal(dAtA []byte) error { +func (m *DNS) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18699,15 +21769,15 @@ func (m *DNSIncludeExclude) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DNSIncludeExclude: wiretype end group for non-group") + return fmt.Errorf("proto: DNS: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DNSIncludeExclude: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DNS: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Include", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18735,13 +21805,14 @@ func (m *DNSIncludeExclude) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Include = append(m.Include, string(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.Domain = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exclude", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18751,23 +21822,25 @@ func (m *DNSIncludeExclude) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Exclude = append(m.Exclude, string(dAtA[iNdEx:postIndex])) + m.Providers = append(m.Providers, DNSProvider{}) + if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -18793,7 +21866,7 @@ func (m *DNSIncludeExclude) Unmarshal(dAtA []byte) error { } return nil } -func (m *DNSProvider) Unmarshal(dAtA []byte) error { +func (m *DNSIncludeExclude) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18816,72 +21889,15 @@ func (m *DNSProvider) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DNSProvider: wiretype end group for non-group") + return fmt.Errorf("proto: DNSIncludeExclude: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DNSProvider: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DNSIncludeExclude: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Domains", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Domains == nil { - m.Domains = &DNSIncludeExclude{} - } - if err := m.Domains.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Primary", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Primary = &b - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Include", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18909,12 +21925,11 @@ func (m *DNSProvider) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.SecretName = &s + m.Include = append(m.Include, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Exclude", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18942,44 +21957,7 @@ func (m *DNSProvider) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Type = &s - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Zones == nil { - m.Zones = &DNSIncludeExclude{} - } - if err := m.Zones.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Exclude = append(m.Exclude, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -19005,7 +21983,7 @@ func (m *DNSProvider) Unmarshal(dAtA []byte) error { } return nil } -func (m *Endpoint) Unmarshal(dAtA []byte) error { +func (m *DNSProvider) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19028,17 +22006,17 @@ func (m *Endpoint) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") + return fmt.Errorf("proto: DNSProvider: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DNSProvider: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Domains", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19048,27 +22026,52 @@ func (m *Endpoint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if m.Domains == nil { + m.Domains = &DNSIncludeExclude{} + } + if err := m.Domains.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Primary", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Primary = &b + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19096,11 +22099,12 @@ func (m *Endpoint) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.URL = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.SecretName = &s iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19128,7 +22132,44 @@ func (m *Endpoint) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Purpose = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.Type = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Zones == nil { + m.Zones = &DNSIncludeExclude{} + } + if err := m.Zones.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -19154,7 +22195,7 @@ func (m *Endpoint) Unmarshal(dAtA []byte) error { } return nil } -func (m *ExpirableVersion) Unmarshal(dAtA []byte) error { +func (m *DataVolume) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19177,15 +22218,15 @@ func (m *ExpirableVersion) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExpirableVersion: wiretype end group for non-group") + return fmt.Errorf("proto: DataVolume: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExpirableVersion: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DataVolume: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19213,13 +22254,13 @@ func (m *ExpirableVersion) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExpirationDate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19229,31 +22270,28 @@ func (m *ExpirableVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ExpirationDate == nil { - m.ExpirationDate = &v11.Time{} - } - if err := m.ExpirationDate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Type = &s iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Classification", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeSize", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19281,9 +22319,29 @@ func (m *ExpirableVersion) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := VersionClassification(dAtA[iNdEx:postIndex]) - m.Classification = &s + m.VolumeSize = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Encrypted", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Encrypted = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19308,7 +22366,7 @@ func (m *ExpirableVersion) Unmarshal(dAtA []byte) error { } return nil } -func (m *Extension) Unmarshal(dAtA []byte) error { +func (m *Endpoint) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19331,15 +22389,15 @@ func (m *Extension) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Extension: wiretype end group for non-group") + return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Extension: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19367,13 +22425,13 @@ func (m *Extension) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19383,27 +22441,55 @@ func (m *Extension) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ProviderConfig == nil { - m.ProviderConfig = &ProviderConfig{} + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType) } - if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Purpose = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -19429,7 +22515,7 @@ func (m *Extension) Unmarshal(dAtA []byte) error { } return nil } -func (m *Gardener) Unmarshal(dAtA []byte) error { +func (m *ExpirableVersion) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19452,15 +22538,15 @@ func (m *Gardener) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Gardener: wiretype end group for non-group") + return fmt.Errorf("proto: ExpirableVersion: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Gardener: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExpirableVersion: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19488,13 +22574,13 @@ func (m *Gardener) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ID = string(dAtA[iNdEx:postIndex]) + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationDate", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19504,27 +22590,31 @@ func (m *Gardener) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if m.ExpirationDate == nil { + m.ExpirationDate = &v11.Time{} + } + if err := m.ExpirationDate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Classification", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19552,7 +22642,8 @@ func (m *Gardener) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(dAtA[iNdEx:postIndex]) + s := VersionClassification(dAtA[iNdEx:postIndex]) + m.Classification = &s iNdEx = postIndex default: iNdEx = preIndex @@ -19578,7 +22669,7 @@ func (m *Gardener) Unmarshal(dAtA []byte) error { } return nil } -func (m *Hibernation) Unmarshal(dAtA []byte) error { +func (m *Extension) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19601,17 +22692,17 @@ func (m *Hibernation) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Hibernation: wiretype end group for non-group") + return fmt.Errorf("proto: Extension: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Hibernation: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Extension: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19621,16 +22712,27 @@ func (m *Hibernation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Enabled = &b + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schedules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19657,11 +22759,34 @@ func (m *Hibernation) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Schedules = append(m.Schedules, HibernationSchedule{}) - if err := m.Schedules[len(m.Schedules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ProviderConfig == nil { + m.ProviderConfig = &runtime.RawExtension{} + } + if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Disabled = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19686,7 +22811,7 @@ func (m *Hibernation) Unmarshal(dAtA []byte) error { } return nil } -func (m *HibernationSchedule) Unmarshal(dAtA []byte) error { +func (m *Gardener) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19709,15 +22834,15 @@ func (m *HibernationSchedule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HibernationSchedule: wiretype end group for non-group") + return fmt.Errorf("proto: Gardener: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HibernationSchedule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Gardener: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19745,12 +22870,11 @@ func (m *HibernationSchedule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Start = &s + m.ID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19778,12 +22902,11 @@ func (m *HibernationSchedule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.End = &s + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Location", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19811,8 +22934,7 @@ func (m *HibernationSchedule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Location = &s + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -19838,7 +22960,7 @@ func (m *HibernationSchedule) Unmarshal(dAtA []byte) error { } return nil } -func (m *HorizontalPodAutoscalerConfig) Unmarshal(dAtA []byte) error { +func (m *Hibernation) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19861,17 +22983,17 @@ func (m *HorizontalPodAutoscalerConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerConfig: wiretype end group for non-group") + return fmt.Errorf("proto: Hibernation: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Hibernation: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CPUInitializationPeriod", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19881,103 +23003,16 @@ func (m *HorizontalPodAutoscalerConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CPUInitializationPeriod == nil { - m.CPUInitializationPeriod = &v11.Duration{} - } - if err := m.CPUInitializationPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + b := bool(v != 0) + m.Enabled = &b case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DownscaleDelay", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DownscaleDelay == nil { - m.DownscaleDelay = &v11.Duration{} - } - if err := m.DownscaleDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DownscaleStabilization", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DownscaleStabilization == nil { - m.DownscaleStabilization = &v11.Duration{} - } - if err := m.DownscaleStabilization.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitialReadinessDelay", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Schedules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20004,18 +23039,69 @@ func (m *HorizontalPodAutoscalerConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.InitialReadinessDelay == nil { - m.InitialReadinessDelay = &v11.Duration{} - } - if err := m.InitialReadinessDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Schedules = append(m.Schedules, HibernationSchedule{}) + if err := m.Schedules[len(m.Schedules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HibernationSchedule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HibernationSchedule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HibernationSchedule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SyncPeriod", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20025,45 +23111,63 @@ func (m *HorizontalPodAutoscalerConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SyncPeriod == nil { - m.SyncPeriod = &v11.Duration{} + s := string(dAtA[iNdEx:postIndex]) + m.Start = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) } - if err := m.SyncPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - case 6: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Tolerance", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - var v uint64 - if (iNdEx + 8) > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.Tolerance = &v2 - case 7: + s := string(dAtA[iNdEx:postIndex]) + m.End = &s + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpscaleDelay", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Location", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20073,27 +23177,24 @@ func (m *HorizontalPodAutoscalerConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.UpscaleDelay == nil { - m.UpscaleDelay = &v11.Duration{} - } - if err := m.UpscaleDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Location = &s iNdEx = postIndex default: iNdEx = preIndex @@ -20119,7 +23220,7 @@ func (m *HorizontalPodAutoscalerConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *KubeAPIServerConfig) Unmarshal(dAtA []byte) error { +func (m *HorizontalPodAutoscalerConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20142,15 +23243,15 @@ func (m *KubeAPIServerConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KubeAPIServerConfig: wiretype end group for non-group") + return fmt.Errorf("proto: HorizontalPodAutoscalerConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KubeAPIServerConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HorizontalPodAutoscalerConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CPUInitializationPeriod", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20177,13 +23278,16 @@ func (m *KubeAPIServerConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.CPUInitializationPeriod == nil { + m.CPUInitializationPeriod = &v11.Duration{} + } + if err := m.CPUInitializationPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdmissionPlugins", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DownscaleDelay", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20210,46 +23314,16 @@ func (m *KubeAPIServerConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AdmissionPlugins = append(m.AdmissionPlugins, AdmissionPlugin{}) - if err := m.AdmissionPlugins[len(m.AdmissionPlugins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.DownscaleDelay == nil { + m.DownscaleDelay = &v11.Duration{} + } + if err := m.DownscaleDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIAudiences", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIAudiences = append(m.APIAudiences, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuditConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DownscaleStabilization", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20276,37 +23350,16 @@ func (m *KubeAPIServerConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.AuditConfig == nil { - m.AuditConfig = &AuditConfig{} + if m.DownscaleStabilization == nil { + m.DownscaleStabilization = &v11.Duration{} } - if err := m.AuditConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.DownscaleStabilization.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EnableBasicAuthentication", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.EnableBasicAuthentication = &b - case 6: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OIDCConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InitialReadinessDelay", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20333,16 +23386,16 @@ func (m *KubeAPIServerConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.OIDCConfig == nil { - m.OIDCConfig = &OIDCConfig{} + if m.InitialReadinessDelay == nil { + m.InitialReadinessDelay = &v11.Duration{} } - if err := m.OIDCConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.InitialReadinessDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SyncPeriod", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20369,95 +23422,28 @@ func (m *KubeAPIServerConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RuntimeConfig == nil { - m.RuntimeConfig = make(map[string]bool) + if m.SyncPeriod == nil { + m.SyncPeriod = &v11.Duration{} } - var mapkey string - var mapvalue bool - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapvaluetemp int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapvaluetemp |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - mapvalue = bool(mapvaluetemp != 0) - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.SyncPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.RuntimeConfig[mapkey] = mapvalue iNdEx = postIndex - case 8: + case 6: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerance", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Tolerance = &v2 + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UpscaleDelay", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20484,10 +23470,10 @@ func (m *KubeAPIServerConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ServiceAccountConfig == nil { - m.ServiceAccountConfig = &ServiceAccountConfig{} + if m.UpscaleDelay == nil { + m.UpscaleDelay = &v11.Duration{} } - if err := m.ServiceAccountConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.UpscaleDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -20515,7 +23501,7 @@ func (m *KubeAPIServerConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *KubeControllerManagerConfig) Unmarshal(dAtA []byte) error { +func (m *Ingress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20538,17 +23524,17 @@ func (m *KubeControllerManagerConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KubeControllerManagerConfig: wiretype end group for non-group") + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KubeControllerManagerConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20558,28 +23544,27 @@ func (m *KubeControllerManagerConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Domain = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HorizontalPodAutoscalerConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20606,33 +23591,10 @@ func (m *KubeControllerManagerConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.HorizontalPodAutoscalerConfig == nil { - m.HorizontalPodAutoscalerConfig = &HorizontalPodAutoscalerConfig{} - } - if err := m.HorizontalPodAutoscalerConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Controller.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeCIDRMaskSize", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.NodeCIDRMaskSize = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -20657,7 +23619,7 @@ func (m *KubeControllerManagerConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *KubeProxyConfig) Unmarshal(dAtA []byte) error { +func (m *IngressController) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20680,17 +23642,17 @@ func (m *KubeProxyConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KubeProxyConfig: wiretype end group for non-group") + return fmt.Errorf("proto: IngressController: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KubeProxyConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressController: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20700,30 +23662,29 @@ func (m *KubeProxyConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20733,24 +23694,27 @@ func (m *KubeProxyConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := ProxyMode(dAtA[iNdEx:postIndex]) - m.Mode = &s + if m.ProviderConfig == nil { + m.ProviderConfig = &runtime.RawExtension{} + } + if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -20776,7 +23740,7 @@ func (m *KubeProxyConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *KubeSchedulerConfig) Unmarshal(dAtA []byte) error { +func (m *KubeAPIServerConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20799,10 +23763,10 @@ func (m *KubeSchedulerConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KubeSchedulerConfig: wiretype end group for non-group") + return fmt.Errorf("proto: KubeAPIServerConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KubeSchedulerConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KubeAPIServerConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -20838,62 +23802,9 @@ func (m *KubeSchedulerConfig) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KubeletConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KubeletConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KubeletConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionPlugins", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20920,34 +23831,14 @@ func (m *KubeletConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.AdmissionPlugins = append(m.AdmissionPlugins, AdmissionPlugin{}) + if err := m.AdmissionPlugins[len(m.AdmissionPlugins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CPUCFSQuota", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.CPUCFSQuota = &b case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CPUManagerPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIAudiences", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20975,12 +23866,11 @@ func (m *KubeletConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.CPUManagerPolicy = &s + m.APIAudiences = append(m.APIAudiences, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvictionHard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AuditConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21007,18 +23897,18 @@ func (m *KubeletConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.EvictionHard == nil { - m.EvictionHard = &KubeletConfigEviction{} + if m.AuditConfig == nil { + m.AuditConfig = &AuditConfig{} } - if err := m.EvictionHard.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.AuditConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EvictionMaxPodGracePeriod", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EnableBasicAuthentication", wireType) } - var v int32 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21028,15 +23918,16 @@ func (m *KubeletConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - m.EvictionMaxPodGracePeriod = &v + b := bool(v != 0) + m.EnableBasicAuthentication = &b case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvictionMinimumReclaim", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OIDCConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21063,16 +23954,16 @@ func (m *KubeletConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.EvictionMinimumReclaim == nil { - m.EvictionMinimumReclaim = &KubeletConfigEvictionMinimumReclaim{} + if m.OIDCConfig == nil { + m.OIDCConfig = &OIDCConfig{} } - if err := m.EvictionMinimumReclaim.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.OIDCConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvictionPressureTransitionPeriod", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21099,16 +23990,95 @@ func (m *KubeletConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.EvictionPressureTransitionPeriod == nil { - m.EvictionPressureTransitionPeriod = &v11.Duration{} + if m.RuntimeConfig == nil { + m.RuntimeConfig = make(map[string]bool) } - if err := m.EvictionPressureTransitionPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + var mapvalue bool + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapvaluetemp int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvaluetemp |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + mapvalue = bool(mapvaluetemp != 0) + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.RuntimeConfig[mapkey] = mapvalue iNdEx = postIndex case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvictionSoft", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21135,16 +24105,16 @@ func (m *KubeletConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.EvictionSoft == nil { - m.EvictionSoft = &KubeletConfigEviction{} + if m.ServiceAccountConfig == nil { + m.ServiceAccountConfig = &ServiceAccountConfig{} } - if err := m.EvictionSoft.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ServiceAccountConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvictionSoftGracePeriod", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WatchCacheSizes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21171,56 +24141,16 @@ func (m *KubeletConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.EvictionSoftGracePeriod == nil { - m.EvictionSoftGracePeriod = &KubeletConfigEvictionSoftGracePeriod{} + if m.WatchCacheSizes == nil { + m.WatchCacheSizes = &WatchCacheSizes{} } - if err := m.EvictionSoftGracePeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.WatchCacheSizes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxPods", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.MaxPods = &v - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PodPIDsLimit", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PodPIDsLimit = &v - case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullProgressDeadline", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21247,10 +24177,10 @@ func (m *KubeletConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ImagePullProgressDeadline == nil { - m.ImagePullProgressDeadline = &v11.Duration{} + if m.Requests == nil { + m.Requests = &KubeAPIServerRequests{} } - if err := m.ImagePullProgressDeadline.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Requests.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -21278,7 +24208,7 @@ func (m *KubeletConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *KubeletConfigEviction) Unmarshal(dAtA []byte) error { +func (m *KubeAPIServerRequests) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21301,17 +24231,17 @@ func (m *KubeletConfigEviction) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KubeletConfigEviction: wiretype end group for non-group") + return fmt.Errorf("proto: KubeAPIServerRequests: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KubeletConfigEviction: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KubeAPIServerRequests: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryAvailable", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxNonMutatingInflight", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21321,30 +24251,17 @@ func (m *KubeletConfigEviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.MemoryAvailable = &s - iNdEx = postIndex + m.MaxNonMutatingInflight = &v case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImageFSAvailable", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxMutatingInflight", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21354,30 +24271,70 @@ func (m *KubeletConfigEviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + m.MaxMutatingInflight = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.ImageFSAvailable = &s - iNdEx = postIndex - case 3: + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KubeControllerManagerConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KubeControllerManagerConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KubeControllerManagerConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImageFSInodesFree", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21387,30 +24344,30 @@ func (m *KubeletConfigEviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.ImageFSInodesFree = &s + if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeFSAvailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HorizontalPodAutoscalerConfig", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21420,30 +24377,53 @@ func (m *KubeletConfigEviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.NodeFSAvailable = &s + if m.HorizontalPodAutoscalerConfig == nil { + m.HorizontalPodAutoscalerConfig = &HorizontalPodAutoscalerConfig{} + } + if err := m.HorizontalPodAutoscalerConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeCIDRMaskSize", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NodeCIDRMaskSize = &v + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeFSInodesFree", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodEvictionTimeout", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21453,24 +24433,27 @@ func (m *KubeletConfigEviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.NodeFSInodesFree = &s + if m.PodEvictionTimeout == nil { + m.PodEvictionTimeout = &v11.Duration{} + } + if err := m.PodEvictionTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -21496,7 +24479,7 @@ func (m *KubeletConfigEviction) Unmarshal(dAtA []byte) error { } return nil } -func (m *KubeletConfigEvictionMinimumReclaim) Unmarshal(dAtA []byte) error { +func (m *KubeProxyConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21519,15 +24502,15 @@ func (m *KubeletConfigEvictionMinimumReclaim) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KubeletConfigEvictionMinimumReclaim: wiretype end group for non-group") + return fmt.Errorf("proto: KubeProxyConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KubeletConfigEvictionMinimumReclaim: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KubeProxyConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryAvailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21554,18 +24537,15 @@ func (m *KubeletConfigEvictionMinimumReclaim) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MemoryAvailable == nil { - m.MemoryAvailable = &resource.Quantity{} - } - if err := m.MemoryAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImageFSAvailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21575,67 +24555,81 @@ func (m *KubeletConfigEvictionMinimumReclaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ImageFSAvailable == nil { - m.ImageFSAvailable = &resource.Quantity{} - } - if err := m.ImageFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := ProxyMode(dAtA[iNdEx:postIndex]) + m.Mode = &s iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImageFSInodesFree", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - if msglen < 0 { + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.ImageFSInodesFree == nil { - m.ImageFSInodesFree = &resource.Quantity{} + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KubeSchedulerConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if err := m.ImageFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 4: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KubeSchedulerConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KubeSchedulerConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeFSAvailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21662,18 +24656,15 @@ func (m *KubeletConfigEvictionMinimumReclaim) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeFSAvailable == nil { - m.NodeFSAvailable = &resource.Quantity{} - } - if err := m.NodeFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeFSInodesFree", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KubeMaxPDVols", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21683,27 +24674,24 @@ func (m *KubeletConfigEvictionMinimumReclaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeFSInodesFree == nil { - m.NodeFSInodesFree = &resource.Quantity{} - } - if err := m.NodeFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.KubeMaxPDVols = &s iNdEx = postIndex default: iNdEx = preIndex @@ -21729,7 +24717,7 @@ func (m *KubeletConfigEvictionMinimumReclaim) Unmarshal(dAtA []byte) error { } return nil } -func (m *KubeletConfigEvictionSoftGracePeriod) Unmarshal(dAtA []byte) error { +func (m *KubeletConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21752,15 +24740,15 @@ func (m *KubeletConfigEvictionSoftGracePeriod) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KubeletConfigEvictionSoftGracePeriod: wiretype end group for non-group") + return fmt.Errorf("proto: KubeletConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KubeletConfigEvictionSoftGracePeriod: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KubeletConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryAvailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KubernetesConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21787,18 +24775,15 @@ func (m *KubeletConfigEvictionSoftGracePeriod) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MemoryAvailable == nil { - m.MemoryAvailable = &v11.Duration{} - } - if err := m.MemoryAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.KubernetesConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImageFSAvailable", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CPUCFSQuota", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21808,33 +24793,18 @@ func (m *KubeletConfigEvictionSoftGracePeriod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ImageFSAvailable == nil { - m.ImageFSAvailable = &v11.Duration{} - } - if err := m.ImageFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + b := bool(v != 0) + m.CPUCFSQuota = &b case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImageFSInodesFree", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CPUManagerPolicy", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -21844,31 +24814,28 @@ func (m *KubeletConfigEvictionSoftGracePeriod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ImageFSInodesFree == nil { - m.ImageFSInodesFree = &v11.Duration{} - } - if err := m.ImageFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.CPUManagerPolicy = &s iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeFSAvailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EvictionHard", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21895,16 +24862,36 @@ func (m *KubeletConfigEvictionSoftGracePeriod) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeFSAvailable == nil { - m.NodeFSAvailable = &v11.Duration{} + if m.EvictionHard == nil { + m.EvictionHard = &KubeletConfigEviction{} } - if err := m.NodeFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.EvictionHard.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EvictionMaxPodGracePeriod", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EvictionMaxPodGracePeriod = &v + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeFSInodesFree", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EvictionMinimumReclaim", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21931,90 +24918,16 @@ func (m *KubeletConfigEvictionSoftGracePeriod) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeFSInodesFree == nil { - m.NodeFSInodesFree = &v11.Duration{} + if m.EvictionMinimumReclaim == nil { + m.EvictionMinimumReclaim = &KubeletConfigEvictionMinimumReclaim{} } - if err := m.NodeFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.EvictionMinimumReclaim.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Kubernetes) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Kubernetes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Kubernetes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegedContainers", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AllowPrivilegedContainers = &b - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterAutoscaler", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EvictionPressureTransitionPeriod", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22041,16 +24954,16 @@ func (m *Kubernetes) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ClusterAutoscaler == nil { - m.ClusterAutoscaler = &ClusterAutoscaler{} + if m.EvictionPressureTransitionPeriod == nil { + m.EvictionPressureTransitionPeriod = &v11.Duration{} } - if err := m.ClusterAutoscaler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.EvictionPressureTransitionPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubeAPIServer", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EvictionSoft", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22077,16 +24990,16 @@ func (m *Kubernetes) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.KubeAPIServer == nil { - m.KubeAPIServer = &KubeAPIServerConfig{} + if m.EvictionSoft == nil { + m.EvictionSoft = &KubeletConfigEviction{} } - if err := m.KubeAPIServer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.EvictionSoft.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubeControllerManager", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EvictionSoftGracePeriod", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22113,18 +25026,18 @@ func (m *Kubernetes) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.KubeControllerManager == nil { - m.KubeControllerManager = &KubeControllerManagerConfig{} + if m.EvictionSoftGracePeriod == nil { + m.EvictionSoftGracePeriod = &KubeletConfigEvictionSoftGracePeriod{} } - if err := m.KubeControllerManager.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.EvictionSoftGracePeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubeScheduler", wireType) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPods", wireType) } - var msglen int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22134,31 +25047,35 @@ func (m *Kubernetes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.KubeScheduler == nil { - m.KubeScheduler = &KubeSchedulerConfig{} + m.MaxPods = &v + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PodPIDsLimit", wireType) } - if err := m.KubeScheduler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - case 6: + m.PodPIDsLimit = &v + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubeProxy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullProgressDeadline", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22185,16 +25102,37 @@ func (m *Kubernetes) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.KubeProxy == nil { - m.KubeProxy = &KubeProxyConfig{} + if m.ImagePullProgressDeadline == nil { + m.ImagePullProgressDeadline = &v11.Duration{} } - if err := m.KubeProxy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ImagePullProgressDeadline.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailSwapOn", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.FailSwapOn = &b + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kubelet", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KubeReserved", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22221,18 +25159,18 @@ func (m *Kubernetes) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Kubelet == nil { - m.Kubelet = &KubeletConfig{} + if m.KubeReserved == nil { + m.KubeReserved = &KubeletConfigReserved{} } - if err := m.Kubelet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.KubeReserved.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SystemReserved", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22242,23 +25180,27 @@ func (m *Kubernetes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(dAtA[iNdEx:postIndex]) + if m.SystemReserved == nil { + m.SystemReserved = &KubeletConfigReserved{} + } + if err := m.SystemReserved.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -22284,7 +25226,7 @@ func (m *Kubernetes) Unmarshal(dAtA []byte) error { } return nil } -func (m *KubernetesConfig) Unmarshal(dAtA []byte) error { +func (m *KubeletConfigEviction) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22307,17 +25249,17 @@ func (m *KubernetesConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KubernetesConfig: wiretype end group for non-group") + return fmt.Errorf("proto: KubeletConfigEviction: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KubernetesConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KubeletConfigEviction: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FeatureGates", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MemoryAvailable", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22327,163 +25269,61 @@ func (m *KubernetesConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.FeatureGates == nil { - m.FeatureGates = make(map[string]bool) + s := string(dAtA[iNdEx:postIndex]) + m.MemoryAvailable = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageFSAvailable", wireType) } - var mapkey string - var mapvalue bool - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapvaluetemp int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapvaluetemp |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - mapvalue = bool(mapvaluetemp != 0) - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break } } - m.FeatureGates[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KubernetesDashboard) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KubernetesDashboard: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KubernetesDashboard: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + s := string(dAtA[iNdEx:postIndex]) + m.ImageFSAvailable = &s + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthenticationMode", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImageFSInodesFree", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22512,13 +25352,13 @@ func (m *KubernetesDashboard) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.AuthenticationMode = &s + m.ImageFSInodesFree = &s iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addon", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeFSAvailable", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22528,81 +25368,28 @@ func (m *KubernetesDashboard) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Addon.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.NodeFSAvailable = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KubernetesInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KubernetesInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KubernetesInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeFSInodesFree", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22630,7 +25417,8 @@ func (m *KubernetesInfo) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.NodeFSInodesFree = &s iNdEx = postIndex default: iNdEx = preIndex @@ -22656,7 +25444,7 @@ func (m *KubernetesInfo) Unmarshal(dAtA []byte) error { } return nil } -func (m *KubernetesSettings) Unmarshal(dAtA []byte) error { +func (m *KubeletConfigEvictionMinimumReclaim) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22679,15 +25467,15 @@ func (m *KubernetesSettings) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KubernetesSettings: wiretype end group for non-group") + return fmt.Errorf("proto: KubeletConfigEvictionMinimumReclaim: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KubernetesSettings: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KubeletConfigEvictionMinimumReclaim: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MemoryAvailable", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22714,69 +25502,18 @@ func (m *KubernetesSettings) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Versions = append(m.Versions, ExpirableVersion{}) - if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.MemoryAvailable == nil { + m.MemoryAvailable = &resource.Quantity{} } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.MemoryAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LastError) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LastError: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LastError: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImageFSAvailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22786,29 +25523,33 @@ func (m *LastError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Description = string(dAtA[iNdEx:postIndex]) + if m.ImageFSAvailable == nil { + m.ImageFSAvailable = &resource.Quantity{} + } + if err := m.ImageFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImageFSInodesFree", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22818,30 +25559,33 @@ func (m *LastError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.TaskID = &s + if m.ImageFSInodesFree == nil { + m.ImageFSInodesFree = &resource.Quantity{} + } + if err := m.ImageFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Codes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeFSAvailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22851,27 +25595,31 @@ func (m *LastError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Codes = append(m.Codes, ErrorCode(dAtA[iNdEx:postIndex])) + if m.NodeFSAvailable == nil { + m.NodeFSAvailable = &resource.Quantity{} + } + if err := m.NodeFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeFSInodesFree", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22898,10 +25646,10 @@ func (m *LastError) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.LastUpdateTime == nil { - m.LastUpdateTime = &v11.Time{} + if m.NodeFSInodesFree == nil { + m.NodeFSInodesFree = &resource.Quantity{} } - if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.NodeFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -22929,7 +25677,7 @@ func (m *LastError) Unmarshal(dAtA []byte) error { } return nil } -func (m *LastOperation) Unmarshal(dAtA []byte) error { +func (m *KubeletConfigEvictionSoftGracePeriod) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22952,17 +25700,17 @@ func (m *LastOperation) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LastOperation: wiretype end group for non-group") + return fmt.Errorf("proto: KubeletConfigEvictionSoftGracePeriod: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LastOperation: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KubeletConfigEvictionSoftGracePeriod: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MemoryAvailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22972,27 +25720,31 @@ func (m *LastOperation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Description = string(dAtA[iNdEx:postIndex]) + if m.MemoryAvailable == nil { + m.MemoryAvailable = &v11.Duration{} + } + if err := m.MemoryAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImageFSAvailable", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23019,15 +25771,18 @@ func (m *LastOperation) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ImageFSAvailable == nil { + m.ImageFSAvailable = &v11.Duration{} + } + if err := m.ImageFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Progress", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageFSInodesFree", wireType) } - m.Progress = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23037,16 +25792,33 @@ func (m *LastOperation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Progress |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageFSInodesFree == nil { + m.ImageFSInodesFree = &v11.Duration{} + } + if err := m.ImageFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeFSAvailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23056,29 +25828,33 @@ func (m *LastOperation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.State = LastOperationState(dAtA[iNdEx:postIndex]) + if m.NodeFSAvailable == nil { + m.NodeFSAvailable = &v11.Duration{} + } + if err := m.NodeFSAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeFSInodesFree", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23088,23 +25864,27 @@ func (m *LastOperation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = LastOperationType(dAtA[iNdEx:postIndex]) + if m.NodeFSInodesFree == nil { + m.NodeFSInodesFree = &v11.Duration{} + } + if err := m.NodeFSInodesFree.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -23130,7 +25910,7 @@ func (m *LastOperation) Unmarshal(dAtA []byte) error { } return nil } -func (m *Machine) Unmarshal(dAtA []byte) error { +func (m *KubeletConfigReserved) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23153,17 +25933,17 @@ func (m *Machine) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Machine: wiretype end group for non-group") + return fmt.Errorf("proto: KubeletConfigReserved: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Machine: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KubeletConfigReserved: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23173,27 +25953,31 @@ func (m *Machine) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) + if m.CPU == nil { + m.CPU = &resource.Quantity{} + } + if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23220,71 +26004,18 @@ func (m *Machine) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Image == nil { - m.Image = &ShootMachineImage{} + if m.Memory == nil { + m.Memory = &resource.Quantity{} } - if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MachineImage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MachineImage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MachineImage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralStorage", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23294,27 +26025,31 @@ func (m *MachineImage) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if m.EphemeralStorage == nil { + m.EphemeralStorage = &resource.Quantity{} + } + if err := m.EphemeralStorage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PID", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23341,8 +26076,10 @@ func (m *MachineImage) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Versions = append(m.Versions, ExpirableVersion{}) - if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.PID == nil { + m.PID = &resource.Quantity{} + } + if err := m.PID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -23370,7 +26107,7 @@ func (m *MachineImage) Unmarshal(dAtA []byte) error { } return nil } -func (m *MachineType) Unmarshal(dAtA []byte) error { +func (m *Kubernetes) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23393,15 +26130,36 @@ func (m *MachineType) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MachineType: wiretype end group for non-group") + return fmt.Errorf("proto: Kubernetes: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MachineType: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Kubernetes: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegedContainers", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowPrivilegedContainers = &b + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterAutoscaler", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23428,13 +26186,16 @@ func (m *MachineType) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ClusterAutoscaler == nil { + m.ClusterAutoscaler = &ClusterAutoscaler{} + } + if err := m.ClusterAutoscaler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GPU", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KubeAPIServer", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23461,13 +26222,16 @@ func (m *MachineType) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.GPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.KubeAPIServer == nil { + m.KubeAPIServer = &KubeAPIServerConfig{} + } + if err := m.KubeAPIServer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KubeControllerManager", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23494,15 +26258,18 @@ func (m *MachineType) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.KubeControllerManager == nil { + m.KubeControllerManager = &KubeControllerManagerConfig{} + } + if err := m.KubeControllerManager.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KubeScheduler", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23512,27 +26279,31 @@ func (m *MachineType) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if m.KubeScheduler == nil { + m.KubeScheduler = &KubeSchedulerConfig{} + } + if err := m.KubeScheduler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KubeProxy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23559,18 +26330,18 @@ func (m *MachineType) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Storage == nil { - m.Storage = &MachineTypeStorage{} + if m.KubeProxy == nil { + m.KubeProxy = &KubeProxyConfig{} } - if err := m.Storage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.KubeProxy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Usable", wireType) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kubelet", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23580,69 +26351,31 @@ func (m *MachineType) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Usable = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MachineTypeStorage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.Kubelet == nil { + m.Kubelet = &KubeletConfig{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.Kubelet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MachineTypeStorage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MachineTypeStorage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Class", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23670,11 +26403,11 @@ func (m *MachineTypeStorage) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Class = string(dAtA[iNdEx:postIndex]) + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StorageSize", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VerticalPodAutoscaler", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23701,41 +26434,12 @@ func (m *MachineTypeStorage) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.StorageSize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if m.VerticalPodAutoscaler == nil { + m.VerticalPodAutoscaler = &VerticalPodAutoscaler{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.VerticalPodAutoscaler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -23761,7 +26465,7 @@ func (m *MachineTypeStorage) Unmarshal(dAtA []byte) error { } return nil } -func (m *Maintenance) Unmarshal(dAtA []byte) error { +func (m *KubernetesConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23784,15 +26488,15 @@ func (m *Maintenance) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Maintenance: wiretype end group for non-group") + return fmt.Errorf("proto: KubernetesConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Maintenance: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KubernetesConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AutoUpdate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FeatureGates", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23819,48 +26523,91 @@ func (m *Maintenance) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.AutoUpdate == nil { - m.AutoUpdate = &MaintenanceAutoUpdate{} - } - if err := m.AutoUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeWindow", wireType) + if m.FeatureGates == nil { + m.FeatureGates = make(map[string]bool) } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue bool + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapvaluetemp int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvaluetemp |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + mapvalue = bool(mapvaluetemp != 0) + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TimeWindow == nil { - m.TimeWindow = &MaintenanceTimeWindow{} - } - if err := m.TimeWindow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.FeatureGates[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -23886,7 +26633,7 @@ func (m *Maintenance) Unmarshal(dAtA []byte) error { } return nil } -func (m *MaintenanceAutoUpdate) Unmarshal(dAtA []byte) error { +func (m *KubernetesDashboard) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23909,17 +26656,17 @@ func (m *MaintenanceAutoUpdate) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MaintenanceAutoUpdate: wiretype end group for non-group") + return fmt.Errorf("proto: KubernetesDashboard: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MaintenanceAutoUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KubernetesDashboard: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KubernetesVersion", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthenticationMode", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23929,17 +26676,30 @@ func (m *MaintenanceAutoUpdate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.KubernetesVersion = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.AuthenticationMode = &s + iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MachineImageVersion", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addon", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23949,12 +26709,25 @@ func (m *MaintenanceAutoUpdate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.MachineImageVersion = bool(v != 0) + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Addon.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -23979,7 +26752,7 @@ func (m *MaintenanceAutoUpdate) Unmarshal(dAtA []byte) error { } return nil } -func (m *MaintenanceTimeWindow) Unmarshal(dAtA []byte) error { +func (m *KubernetesInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24002,47 +26775,15 @@ func (m *MaintenanceTimeWindow) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MaintenanceTimeWindow: wiretype end group for non-group") + return fmt.Errorf("proto: KubernetesInfo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MaintenanceTimeWindow: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KubernetesInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Begin", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Begin = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24070,7 +26811,7 @@ func (m *MaintenanceTimeWindow) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.End = string(dAtA[iNdEx:postIndex]) + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -24096,7 +26837,7 @@ func (m *MaintenanceTimeWindow) Unmarshal(dAtA []byte) error { } return nil } -func (m *Monitoring) Unmarshal(dAtA []byte) error { +func (m *KubernetesSettings) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24119,15 +26860,15 @@ func (m *Monitoring) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Monitoring: wiretype end group for non-group") + return fmt.Errorf("proto: KubernetesSettings: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Monitoring: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KubernetesSettings: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Alerting", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24154,10 +26895,8 @@ func (m *Monitoring) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Alerting == nil { - m.Alerting = &Alerting{} - } - if err := m.Alerting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Versions = append(m.Versions, ExpirableVersion{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -24185,7 +26924,7 @@ func (m *Monitoring) Unmarshal(dAtA []byte) error { } return nil } -func (m *Networking) Unmarshal(dAtA []byte) error { +func (m *LastError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24208,15 +26947,15 @@ func (m *Networking) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Networking: wiretype end group for non-group") + return fmt.Errorf("proto: LastError: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Networking: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LastError: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24244,47 +26983,11 @@ func (m *Networking) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) + m.Description = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ProviderConfig == nil { - m.ProviderConfig = &ProviderConfig{} - } - if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24313,11 +27016,11 @@ func (m *Networking) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Pods = &s + m.TaskID = &s iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Codes", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24345,14 +27048,13 @@ func (m *Networking) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Nodes = &s + m.Codes = append(m.Codes, ErrorCode(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 5: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24362,24 +27064,27 @@ func (m *Networking) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Services = &s + if m.LastUpdateTime == nil { + m.LastUpdateTime = &v11.Time{} + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -24405,7 +27110,7 @@ func (m *Networking) Unmarshal(dAtA []byte) error { } return nil } -func (m *NginxIngress) Unmarshal(dAtA []byte) error { +func (m *LastOperation) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24428,17 +27133,17 @@ func (m *NginxIngress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NginxIngress: wiretype end group for non-group") + return fmt.Errorf("proto: LastOperation: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NginxIngress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LastOperation: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addon", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24448,30 +27153,29 @@ func (m *NginxIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Addon.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Description = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24481,29 +27185,30 @@ func (m *NginxIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(dAtA[iNdEx:postIndex])) + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Progress", wireType) } - var msglen int + m.Progress = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24513,124 +27218,4188 @@ func (m *NginxIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Progress |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = LastOperationState(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = LastOperationType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Machine) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Machine: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Machine: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &ShootMachineImage{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MachineControllerManagerSettings) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MachineControllerManagerSettings: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MachineControllerManagerSettings: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MachineDrainTimeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MachineDrainTimeout == nil { + m.MachineDrainTimeout = &v11.Duration{} + } + if err := m.MachineDrainTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MachineHealthTimeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MachineHealthTimeout == nil { + m.MachineHealthTimeout = &v11.Duration{} + } + if err := m.MachineHealthTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MachineCreationTimeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MachineCreationTimeout == nil { + m.MachineCreationTimeout = &v11.Duration{} + } + if err := m.MachineCreationTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxEvictRetries", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxEvictRetries = &v + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeConditions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeConditions = append(m.NodeConditions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MachineImage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MachineImage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MachineImage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, MachineImageVersion{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MachineImageVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MachineImageVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MachineImageVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirableVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ExpirableVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CRI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CRI = append(m.CRI, CRI{}) + if err := m.CRI[len(m.CRI)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MachineType) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MachineType: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MachineType: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GPU", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Storage == nil { + m.Storage = &MachineTypeStorage{} + } + if err := m.Storage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Usable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Usable = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MachineTypeStorage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MachineTypeStorage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MachineTypeStorage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Class", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Class = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageSize", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StorageSize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Maintenance) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Maintenance: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Maintenance: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AutoUpdate == nil { + m.AutoUpdate = &MaintenanceAutoUpdate{} + } + if err := m.AutoUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeWindow", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TimeWindow == nil { + m.TimeWindow = &MaintenanceTimeWindow{} + } + if err := m.TimeWindow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfineSpecUpdateRollout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ConfineSpecUpdateRollout = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MaintenanceAutoUpdate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MaintenanceAutoUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MaintenanceAutoUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KubernetesVersion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.KubernetesVersion = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MachineImageVersion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MachineImageVersion = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MaintenanceTimeWindow) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MaintenanceTimeWindow: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MaintenanceTimeWindow: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Begin", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Begin = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.End = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Monitoring) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Monitoring: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Monitoring: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alerting", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Alerting == nil { + m.Alerting = &Alerting{} + } + if err := m.Alerting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedResourceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedResourceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedResourceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ResourceRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Networking) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Networking: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Networking: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProviderConfig == nil { + m.ProviderConfig = &runtime.RawExtension{} + } + if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Pods = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Nodes = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Services = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NginxIngress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NginxIngress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NginxIngress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addon", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Addon.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } if m.Config == nil { m.Config = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Config[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalTrafficPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex]) + m.ExternalTrafficPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OIDCConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OIDCConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OIDCConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.CABundle = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientAuthentication", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientAuthentication == nil { + m.ClientAuthentication = &OpenIDConnectClientAuthentication{} + } + if err := m.ClientAuthentication.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ClientID = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupsClaim", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.GroupsClaim = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupsPrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.GroupsPrefix = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IssuerURL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.IssuerURL = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredClaims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequiredClaims == nil { + m.RequiredClaims = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.RequiredClaims[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SigningAlgs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SigningAlgs = append(m.SigningAlgs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsernameClaim", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.UsernameClaim = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsernamePrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.UsernamePrefix = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OpenIDConnectClientAuthentication) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OpenIDConnectClientAuthentication: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OpenIDConnectClientAuthentication: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtraConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExtraConfig == nil { + m.ExtraConfig = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ExtraConfig[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Secret = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Plant) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Plant: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Plant: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PlantList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PlantList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PlantList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Plant{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PlantSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PlantSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PlantSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoints = append(m.Endpoints, Endpoint{}) + if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PlantStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PlantStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PlantStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClusterInfo == nil { + m.ClusterInfo = &ClusterInfo{} + } + if err := m.ClusterInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Project) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Project: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Project: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Project{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectMember) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectMember: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectMember: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Subject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreatedBy == nil { + m.CreatedBy = &v13.Subject{} + } + if err := m.CreatedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Description = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Owner == nil { + m.Owner = &v13.Subject{} + } + if err := m.Owner.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Purpose = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, ProjectMember{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Namespace = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tolerations == nil { + m.Tolerations = &ProjectTolerations{} + } + if err := m.Tolerations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = ProjectPhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StaleSinceTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.Config[mapkey] = mapvalue + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StaleSinceTimestamp == nil { + m.StaleSinceTimestamp = &v11.Time{} + } + if err := m.StaleSinceTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalTrafficPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StaleAutoDeleteTimestamp", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24640,24 +31409,27 @@ func (m *NginxIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := k8s_io_api_core_v1.ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex]) - m.ExternalTrafficPolicy = &s + if m.StaleAutoDeleteTimestamp == nil { + m.StaleAutoDeleteTimestamp = &v11.Time{} + } + if err := m.StaleAutoDeleteTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -24683,7 +31455,7 @@ func (m *NginxIngress) Unmarshal(dAtA []byte) error { } return nil } -func (m *OIDCConfig) Unmarshal(dAtA []byte) error { +func (m *ProjectTolerations) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24706,17 +31478,17 @@ func (m *OIDCConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: OIDCConfig: wiretype end group for non-group") + return fmt.Errorf("proto: ProjectTolerations: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: OIDCConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ProjectTolerations: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Defaults", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24726,28 +31498,29 @@ func (m *OIDCConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.CABundle = &s + m.Defaults = append(m.Defaults, Toleration{}) + if err := m.Defaults[len(m.Defaults)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientAuthentication", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Whitelist", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24774,16 +31547,67 @@ func (m *OIDCConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ClientAuthentication == nil { - m.ClientAuthentication = &OpenIDConnectClientAuthentication{} - } - if err := m.ClientAuthentication.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Whitelist = append(m.Whitelist, Toleration{}) + if err := m.Whitelist[len(m.Whitelist)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Provider) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Provider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Provider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24811,14 +31635,13 @@ func (m *OIDCConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.ClientID = &s + m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupsClaim", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ControlPlaneConfig", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24828,30 +31651,33 @@ func (m *OIDCConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.GroupsClaim = &s + if m.ControlPlaneConfig == nil { + m.ControlPlaneConfig = &runtime.RawExtension{} + } + if err := m.ControlPlaneConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupsPrefix", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InfrastructureConfig", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24861,30 +31687,33 @@ func (m *OIDCConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.GroupsPrefix = &s + if m.InfrastructureConfig == nil { + m.InfrastructureConfig = &runtime.RawExtension{} + } + if err := m.InfrastructureConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 6: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IssuerURL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workers", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24894,28 +31723,82 @@ func (m *OIDCConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.IssuerURL = &s + m.Workers = append(m.Workers, Worker{}) + if err := m.Workers[len(m.Workers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 7: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Quota) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Quota: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Quota: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredClaims", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24942,109 +31825,15 @@ func (m *OIDCConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RequiredClaims == nil { - m.RequiredClaims = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.RequiredClaims[mapkey] = mapvalue iNdEx = postIndex - case 8: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SigningAlgs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25054,29 +31843,83 @@ func (m *OIDCConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.SigningAlgs = append(m.SigningAlgs, string(dAtA[iNdEx:postIndex])) + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 9: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QuotaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QuotaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QuotaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UsernameClaim", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25086,30 +31929,30 @@ func (m *OIDCConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.UsernameClaim = &s + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 10: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UsernamePrefix", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25119,24 +31962,25 @@ func (m *OIDCConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.UsernamePrefix = &s + m.Items = append(m.Items, Quota{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -25162,7 +32006,7 @@ func (m *OIDCConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *OpenIDConnectClientAuthentication) Unmarshal(dAtA []byte) error { +func (m *QuotaSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25185,15 +32029,35 @@ func (m *OpenIDConnectClientAuthentication) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: OpenIDConnectClientAuthentication: wiretype end group for non-group") + return fmt.Errorf("proto: QuotaSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: OpenIDConnectClientAuthentication: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: QuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterLifetimeDays", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClusterLifetimeDays = &v + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExtraConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25220,11 +32084,11 @@ func (m *OpenIDConnectClientAuthentication) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ExtraConfig == nil { - m.ExtraConfig = make(map[string]string) + if m.Metrics == nil { + m.Metrics = make(k8s_io_api_core_v1.ResourceList) } - var mapkey string - var mapvalue string + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -25270,10 +32134,10 @@ func (m *OpenIDConnectClientAuthentication) Unmarshal(dAtA []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { - var stringLenmapvalue uint64 + var mapmsglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25283,24 +32147,26 @@ func (m *OpenIDConnectClientAuthentication) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { + if mapmsglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } - if postStringIndexmapvalue > l { + if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex } else { iNdEx = entryPreIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -25316,13 +32182,13 @@ func (m *OpenIDConnectClientAuthentication) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.ExtraConfig[mapkey] = mapvalue + m.Metrics[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25332,24 +32198,24 @@ func (m *OpenIDConnectClientAuthentication) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Secret = &s + if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -25375,7 +32241,7 @@ func (m *OpenIDConnectClientAuthentication) Unmarshal(dAtA []byte) error { } return nil } -func (m *Plant) Unmarshal(dAtA []byte) error { +func (m *Region) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25398,17 +32264,17 @@ func (m *Plant) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Plant: wiretype end group for non-group") + return fmt.Errorf("proto: Region: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Plant: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Region: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25418,28 +32284,27 @@ func (m *Plant) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25466,13 +32331,14 @@ func (m *Plant) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Zones = append(m.Zones, AvailabilityZone{}) + if err := m.Zones[len(m.Zones)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25499,9 +32365,103 @@ func (m *Plant) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.Labels[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -25527,7 +32487,7 @@ func (m *Plant) Unmarshal(dAtA []byte) error { } return nil } -func (m *PlantList) Unmarshal(dAtA []byte) error { +func (m *ResourceWatchCacheSize) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25550,17 +32510,17 @@ func (m *PlantList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PlantList: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceWatchCacheSize: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PlantList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceWatchCacheSize: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25570,30 +32530,30 @@ func (m *PlantList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.APIGroup = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -25603,26 +32563,43 @@ func (m *PlantList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Plant{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Resource = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CacheSize", wireType) + } + m.CacheSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CacheSize |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -25647,7 +32624,7 @@ func (m *PlantList) Unmarshal(dAtA []byte) error { } return nil } -func (m *PlantSpec) Unmarshal(dAtA []byte) error { +func (m *SecretBinding) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25670,13 +32647,46 @@ func (m *PlantSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PlantSpec: wiretype end group for non-group") + return fmt.Errorf("proto: SecretBinding: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PlantSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SecretBinding: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } @@ -25709,9 +32719,9 @@ func (m *PlantSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Quotas", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25738,8 +32748,8 @@ func (m *PlantSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Endpoints = append(m.Endpoints, Endpoint{}) - if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Quotas = append(m.Quotas, v1.ObjectReference{}) + if err := m.Quotas[len(m.Quotas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -25767,7 +32777,7 @@ func (m *PlantSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *PlantStatus) Unmarshal(dAtA []byte) error { +func (m *SecretBindingList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25790,15 +32800,15 @@ func (m *PlantStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PlantStatus: wiretype end group for non-group") + return fmt.Errorf("proto: SecretBindingList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PlantStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SecretBindingList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25825,34 +32835,13 @@ func (m *PlantStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ObservedGeneration = &v - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25879,10 +32868,8 @@ func (m *PlantStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ClusterInfo == nil { - m.ClusterInfo = &ClusterInfo{} - } - if err := m.ClusterInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, SecretBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -25910,7 +32897,7 @@ func (m *PlantStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *Project) Unmarshal(dAtA []byte) error { +func (m *Seed) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25933,10 +32920,10 @@ func (m *Project) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Project: wiretype end group for non-group") + return fmt.Errorf("proto: Seed: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Project: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Seed: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -26062,7 +33049,7 @@ func (m *Project) Unmarshal(dAtA []byte) error { } return nil } -func (m *ProjectList) Unmarshal(dAtA []byte) error { +func (m *SeedBackup) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26085,15 +33072,47 @@ func (m *ProjectList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ProjectList: wiretype end group for non-group") + return fmt.Errorf("proto: SeedBackup: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ProjectList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SeedBackup: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provider = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26120,13 +33139,49 @@ func (m *ProjectList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ProviderConfig == nil { + m.ProviderConfig = &runtime.RawExtension{} + } + if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Region = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26153,8 +33208,7 @@ func (m *ProjectList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Project{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -26182,7 +33236,7 @@ func (m *ProjectList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ProjectMember) Unmarshal(dAtA []byte) error { +func (m *SeedDNS) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26205,48 +33259,15 @@ func (m *ProjectMember) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ProjectMember: wiretype end group for non-group") + return fmt.Errorf("proto: SeedDNS: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ProjectMember: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SeedDNS: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Subject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressDomain", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26274,13 +33295,14 @@ func (m *ProjectMember) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Role = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.IngressDomain = &s iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -26290,23 +33312,27 @@ func (m *ProjectMember) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + if m.Provider == nil { + m.Provider = &SeedDNSProvider{} + } + if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -26332,7 +33358,7 @@ func (m *ProjectMember) Unmarshal(dAtA []byte) error { } return nil } -func (m *ProjectSpec) Unmarshal(dAtA []byte) error { +func (m *SeedDNSProvider) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26355,17 +33381,17 @@ func (m *ProjectSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ProjectSpec: wiretype end group for non-group") + return fmt.Errorf("proto: SeedDNSProvider: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ProjectSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SeedDNSProvider: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedBy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -26375,33 +33401,29 @@ func (m *ProjectSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.CreatedBy == nil { - m.CreatedBy = &v12.Subject{} - } - if err := m.CreatedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -26411,28 +33433,28 @@ func (m *ProjectSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Description = &s + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Domains", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26459,18 +33481,18 @@ func (m *ProjectSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Owner == nil { - m.Owner = &v12.Subject{} + if m.Domains == nil { + m.Domains = &DNSIncludeExclude{} } - if err := m.Owner.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Domains.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -26480,28 +33502,84 @@ func (m *ProjectSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Purpose = &s + if m.Zones == nil { + m.Zones = &DNSIncludeExclude{} + } + if err := m.Zones.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26528,16 +33606,15 @@ func (m *ProjectSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Members = append(m.Members, ProjectMember{}) - if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -26547,24 +33624,25 @@ func (m *ProjectSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Namespace = &s + m.Items = append(m.Items, Seed{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -26590,7 +33668,7 @@ func (m *ProjectSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ProjectStatus) Unmarshal(dAtA []byte) error { +func (m *SeedNetworks) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26613,17 +33691,17 @@ func (m *ProjectStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ProjectStatus: wiretype end group for non-group") + return fmt.Errorf("proto: SeedNetworks: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ProjectStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SeedNetworks: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) } - m.ObservedGeneration = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -26633,14 +33711,28 @@ func (m *ProjectStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Nodes = &s + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26668,64 +33760,11 @@ func (m *ProjectStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Phase = ProjectPhase(dAtA[iNdEx:postIndex]) + m.Pods = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Provider) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Provider: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Provider: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26753,47 +33792,11 @@ func (m *Provider) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ControlPlaneConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ControlPlaneConfig == nil { - m.ControlPlaneConfig = &ProviderConfig{} - } - if err := m.ControlPlaneConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Services = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InfrastructureConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShootDefaults", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26820,18 +33823,18 @@ func (m *Provider) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.InfrastructureConfig == nil { - m.InfrastructureConfig = &ProviderConfig{} + if m.ShootDefaults == nil { + m.ShootDefaults = &ShootNetworks{} } - if err := m.InfrastructureConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ShootDefaults.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BlockCIDRs", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -26841,25 +33844,23 @@ func (m *Provider) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Workers = append(m.Workers, Worker{}) - if err := m.Workers[len(m.Workers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.BlockCIDRs = append(m.BlockCIDRs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -26885,7 +33886,7 @@ func (m *Provider) Unmarshal(dAtA []byte) error { } return nil } -func (m *ProviderConfig) Unmarshal(dAtA []byte) error { +func (m *SeedProvider) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26908,15 +33909,47 @@ func (m *ProviderConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ProviderConfig: wiretype end group for non-group") + return fmt.Errorf("proto: SeedProvider: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ProviderConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SeedProvider: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RawExtension", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26943,10 +33976,45 @@ func (m *ProviderConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.RawExtension.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ProviderConfig == nil { + m.ProviderConfig = &runtime.RawExtension{} + } + if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Region = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -26971,7 +34039,7 @@ func (m *ProviderConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *Quota) Unmarshal(dAtA []byte) error { +func (m *SeedSelector) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26994,15 +34062,15 @@ func (m *Quota) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Quota: wiretype end group for non-group") + return fmt.Errorf("proto: SeedSelector: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Quota: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SeedSelector: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -27029,15 +34097,18 @@ func (m *Quota) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.LabelSelector == nil { + m.LabelSelector = &v11.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProviderTypes", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -27047,24 +34118,23 @@ func (m *Quota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ProviderTypes = append(m.ProviderTypes, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -27090,7 +34160,7 @@ func (m *Quota) Unmarshal(dAtA []byte) error { } return nil } -func (m *QuotaList) Unmarshal(dAtA []byte) error { +func (m *SeedSettingExcessCapacityReservation) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27113,50 +34183,17 @@ func (m *QuotaList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QuotaList: wiretype end group for non-group") + return fmt.Errorf("proto: SeedSettingExcessCapacityReservation: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QuotaList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SeedSettingExcessCapacityReservation: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -27166,26 +34203,12 @@ func (m *QuotaList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Quota{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.Enabled = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -27210,7 +34233,7 @@ func (m *QuotaList) Unmarshal(dAtA []byte) error { } return nil } -func (m *QuotaSpec) Unmarshal(dAtA []byte) error { +func (m *SeedSettingLoadBalancerServices) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27233,35 +34256,15 @@ func (m *QuotaSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QuotaSpec: wiretype end group for non-group") + return fmt.Errorf("proto: SeedSettingLoadBalancerServices: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SeedSettingLoadBalancerServices: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterLifetimeDays", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ClusterLifetimeDays = &v - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -27288,11 +34291,11 @@ func (m *QuotaSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metrics == nil { - m.Metrics = make(k8s_io_api_core_v1.ResourceList) + if m.Annotations == nil { + m.Annotations = make(map[string]string) } - var mapkey k8s_io_api_core_v1.ResourceName - mapvalue := &resource.Quantity{} + var mapkey string + var mapvalue string for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -27338,10 +34341,10 @@ func (m *QuotaSpec) Unmarshal(dAtA []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { - var mapmsglen int + var stringLenmapvalue uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -27351,26 +34354,24 @@ func (m *QuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= int(b&0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if mapmsglen < 0 { + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { return ErrInvalidLengthGenerated } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { return ErrInvalidLengthGenerated } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF } - iNdEx = postmsgIndex + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue } else { iNdEx = entryPreIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -27386,40 +34387,7 @@ func (m *QuotaSpec) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Metrics[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -27445,7 +34413,7 @@ func (m *QuotaSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *Region) Unmarshal(dAtA []byte) error { +func (m *SeedSettingScheduling) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27468,49 +34436,17 @@ func (m *Region) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Region: wiretype end group for non-group") + return fmt.Errorf("proto: SeedSettingScheduling: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Region: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SeedSettingScheduling: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Visible", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -27520,26 +34456,12 @@ func (m *Region) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Zones = append(m.Zones, AvailabilityZone{}) - if err := m.Zones[len(m.Zones)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.Visible = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -27564,7 +34486,7 @@ func (m *Region) Unmarshal(dAtA []byte) error { } return nil } -func (m *SecretBinding) Unmarshal(dAtA []byte) error { +func (m *SeedSettingShootDNS) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27587,17 +34509,17 @@ func (m *SecretBinding) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SecretBinding: wiretype end group for non-group") + return fmt.Errorf("proto: SeedSettingShootDNS: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SecretBinding: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SeedSettingShootDNS: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -27607,63 +34529,70 @@ func (m *SecretBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + m.Enabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedSettingVerticalPodAutoscaler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Quotas", wireType) + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedSettingVerticalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedSettingVerticalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -27673,26 +34602,12 @@ func (m *SecretBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Quotas = append(m.Quotas, v1.ObjectReference{}) - if err := m.Quotas[len(m.Quotas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.Enabled = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -27717,7 +34632,7 @@ func (m *SecretBinding) Unmarshal(dAtA []byte) error { } return nil } -func (m *SecretBindingList) Unmarshal(dAtA []byte) error { +func (m *SeedSettings) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27740,15 +34655,15 @@ func (m *SecretBindingList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SecretBindingList: wiretype end group for non-group") + return fmt.Errorf("proto: SeedSettings: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SecretBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SeedSettings: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExcessCapacityReservation", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -27775,13 +34690,16 @@ func (m *SecretBindingList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ExcessCapacityReservation == nil { + m.ExcessCapacityReservation = &SeedSettingExcessCapacityReservation{} + } + if err := m.ExcessCapacityReservation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Scheduling", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -27808,67 +34726,16 @@ func (m *SecretBindingList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, SecretBinding{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Scheduling == nil { + m.Scheduling = &SeedSettingScheduling{} } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.Scheduling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Seed) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Seed: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Seed: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShootDNS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -27895,13 +34762,16 @@ func (m *Seed) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ShootDNS == nil { + m.ShootDNS = &SeedSettingShootDNS{} + } + if err := m.ShootDNS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerServices", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -27928,13 +34798,16 @@ func (m *Seed) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.LoadBalancerServices == nil { + m.LoadBalancerServices = &SeedSettingLoadBalancerServices{} + } + if err := m.LoadBalancerServices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VerticalPodAutoscaler", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -27961,7 +34834,10 @@ func (m *Seed) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.VerticalPodAutoscaler == nil { + m.VerticalPodAutoscaler = &SeedSettingVerticalPodAutoscaler{} + } + if err := m.VerticalPodAutoscaler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -27989,7 +34865,7 @@ func (m *Seed) Unmarshal(dAtA []byte) error { } return nil } -func (m *SeedBackup) Unmarshal(dAtA []byte) error { +func (m *SeedSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28012,17 +34888,17 @@ func (m *SeedBackup) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SeedBackup: wiretype end group for non-group") + return fmt.Errorf("proto: SeedSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SeedBackup: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SeedSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backup", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -28032,27 +34908,31 @@ func (m *SeedBackup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Provider = string(dAtA[iNdEx:postIndex]) + if m.Backup == nil { + m.Backup = &SeedBackup{} + } + if err := m.Backup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DNS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -28079,18 +34959,15 @@ func (m *SeedBackup) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ProviderConfig == nil { - m.ProviderConfig = &ProviderConfig{} - } - if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.DNS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -28100,28 +34977,28 @@ func (m *SeedBackup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Region = &s + if err := m.Networks.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -28148,68 +35025,51 @@ func (m *SeedBackup) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } - if skippy < 0 { + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SeedDNS) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.SecretRef == nil { + m.SecretRef = &v1.SecretReference{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SeedDNS: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SeedDNS: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressDomain", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -28219,80 +35079,65 @@ func (m *SeedDNS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.IngressDomain = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + m.Taints = append(m.Taints, SeedTaint{}) + if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if skippy < 0 { + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SeedList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.Volume == nil { + m.Volume = &SeedVolume{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.Volume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SeedList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SeedList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Settings", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -28319,13 +35164,16 @@ func (m *SeedList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Settings == nil { + m.Settings = &SeedSettings{} + } + if err := m.Settings.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -28352,8 +35200,10 @@ func (m *SeedList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Seed{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Ingress == nil { + m.Ingress = &Ingress{} + } + if err := m.Ingress.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -28381,7 +35231,7 @@ func (m *SeedList) Unmarshal(dAtA []byte) error { } return nil } -func (m *SeedNetworks) Unmarshal(dAtA []byte) error { +func (m *SeedStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28404,17 +35254,17 @@ func (m *SeedNetworks) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SeedNetworks: wiretype end group for non-group") + return fmt.Errorf("proto: SeedStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SeedNetworks: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SeedStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Gardener", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -28424,28 +35274,31 @@ func (m *SeedNetworks) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Nodes = &s + if m.Gardener == nil { + m.Gardener = &Gardener{} + } + if err := m.Gardener.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KubernetesVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -28473,11 +35326,65 @@ func (m *SeedNetworks) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Pods = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.KubernetesVersion = &s iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIdentity", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -28505,11 +35412,12 @@ func (m *SeedNetworks) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Services = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.ClusterIdentity = &s iNdEx = postIndex - case 4: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShootDefaults", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -28536,18 +35444,111 @@ func (m *SeedNetworks) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ShootDefaults == nil { - m.ShootDefaults = &ShootNetworks{} + if m.Capacity == nil { + m.Capacity = make(k8s_io_api_core_v1.ResourceList) } - if err := m.ShootDefaults.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.Capacity[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue iNdEx = postIndex - case 5: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockCIDRs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -28557,23 +35558,120 @@ func (m *SeedNetworks) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.BlockCIDRs = append(m.BlockCIDRs, string(dAtA[iNdEx:postIndex])) + if m.Allocatable == nil { + m.Allocatable = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Allocatable[k8s_io_api_core_v1.ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -28599,7 +35697,7 @@ func (m *SeedNetworks) Unmarshal(dAtA []byte) error { } return nil } -func (m *SeedProvider) Unmarshal(dAtA []byte) error { +func (m *SeedTaint) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28622,15 +35720,15 @@ func (m *SeedProvider) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SeedProvider: wiretype end group for non-group") + return fmt.Errorf("proto: SeedTaint: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SeedProvider: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SeedTaint: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -28658,47 +35756,11 @@ func (m *SeedProvider) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ProviderConfig == nil { - m.ProviderConfig = &ProviderConfig{} - } - if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -28726,7 +35788,8 @@ func (m *SeedProvider) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Region = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.Value = &s iNdEx = postIndex default: iNdEx = preIndex @@ -28752,7 +35815,7 @@ func (m *SeedProvider) Unmarshal(dAtA []byte) error { } return nil } -func (m *SeedSpec) Unmarshal(dAtA []byte) error { +func (m *SeedTemplate) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28775,15 +35838,15 @@ func (m *SeedSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SeedSpec: wiretype end group for non-group") + return fmt.Errorf("proto: SeedTemplate: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SeedSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SeedTemplate: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -28810,16 +35873,13 @@ func (m *SeedSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Backup == nil { - m.Backup = &SeedBackup{} - } - if err := m.Backup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DNS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -28846,115 +35906,66 @@ func (m *SeedSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.DNS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Networks.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeedVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &v1.SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 6: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeedVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeedVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MinimumSize", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -28981,14 +35992,16 @@ func (m *SeedSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Taints = append(m.Taints, SeedTaint{}) - if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.MinimumSize == nil { + m.MinimumSize = &resource.Quantity{} + } + if err := m.MinimumSize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29015,10 +36028,8 @@ func (m *SeedSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Volume == nil { - m.Volume = &SeedVolume{} - } - if err := m.Volume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Providers = append(m.Providers, SeedVolumeProvider{}) + if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29046,7 +36057,7 @@ func (m *SeedSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *SeedStatus) Unmarshal(dAtA []byte) error { +func (m *SeedVolumeProvider) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29069,17 +36080,17 @@ func (m *SeedStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SeedStatus: wiretype end group for non-group") + return fmt.Errorf("proto: SeedVolumeProvider: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SeedStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SeedVolumeProvider: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Gardener", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29089,31 +36100,27 @@ func (m *SeedStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Gardener == nil { - m.Gardener = &Gardener{} - } - if err := m.Gardener.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Purpose = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubernetesVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29141,14 +36148,66 @@ func (m *SeedStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.KubernetesVersion = &s + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Issuer", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29158,31 +36217,30 @@ func (m *SeedStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Issuer = &s iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SigningKeySecret", wireType) } - m.ObservedGeneration = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29192,11 +36250,28 @@ func (m *SeedStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SigningKeySecret == nil { + m.SigningKeySecret = &v1.LocalObjectReference{} + } + if err := m.SigningKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -29221,7 +36296,7 @@ func (m *SeedStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *SeedTaint) Unmarshal(dAtA []byte) error { +func (m *Shoot) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29244,17 +36319,17 @@ func (m *SeedTaint) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SeedTaint: wiretype end group for non-group") + return fmt.Errorf("proto: Shoot: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SeedTaint: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Shoot: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29264,29 +36339,30 @@ func (m *SeedTaint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29296,24 +36372,57 @@ func (m *SeedTaint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Value = &s + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -29339,7 +36448,7 @@ func (m *SeedTaint) Unmarshal(dAtA []byte) error { } return nil } -func (m *SeedVolume) Unmarshal(dAtA []byte) error { +func (m *ShootList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29362,15 +36471,15 @@ func (m *SeedVolume) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SeedVolume: wiretype end group for non-group") + return fmt.Errorf("proto: ShootList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SeedVolume: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShootList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MinimumSize", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29397,16 +36506,13 @@ func (m *SeedVolume) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MinimumSize == nil { - m.MinimumSize = &resource.Quantity{} - } - if err := m.MinimumSize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29433,8 +36539,8 @@ func (m *SeedVolume) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Providers = append(m.Providers, SeedVolumeProvider{}) - if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, Shoot{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29462,7 +36568,7 @@ func (m *SeedVolume) Unmarshal(dAtA []byte) error { } return nil } -func (m *SeedVolumeProvider) Unmarshal(dAtA []byte) error { +func (m *ShootMachineImage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29485,15 +36591,15 @@ func (m *SeedVolumeProvider) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SeedVolumeProvider: wiretype end group for non-group") + return fmt.Errorf("proto: ShootMachineImage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SeedVolumeProvider: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShootMachineImage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29521,11 +36627,47 @@ func (m *SeedVolumeProvider) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Purpose = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProviderConfig == nil { + m.ProviderConfig = &runtime.RawExtension{} + } + if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29553,7 +36695,8 @@ func (m *SeedVolumeProvider) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.Version = &s iNdEx = postIndex default: iNdEx = preIndex @@ -29579,7 +36722,7 @@ func (m *SeedVolumeProvider) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceAccountConfig) Unmarshal(dAtA []byte) error { +func (m *ShootNetworks) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29602,15 +36745,15 @@ func (m *ServiceAccountConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceAccountConfig: wiretype end group for non-group") + return fmt.Errorf("proto: ShootNetworks: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceAccountConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShootNetworks: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Issuer", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29639,13 +36782,13 @@ func (m *ServiceAccountConfig) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Issuer = &s + m.Pods = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SigningKeySecret", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29655,27 +36798,24 @@ func (m *ServiceAccountConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SigningKeySecret == nil { - m.SigningKeySecret = &v1.LocalObjectReference{} - } - if err := m.SigningKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Services = &s iNdEx = postIndex default: iNdEx = preIndex @@ -29701,7 +36841,7 @@ func (m *ServiceAccountConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *Shoot) Unmarshal(dAtA []byte) error { +func (m *ShootSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29724,15 +36864,15 @@ func (m *Shoot) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Shoot: wiretype end group for non-group") + return fmt.Errorf("proto: ShootSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Shoot: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShootSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Addons", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29759,13 +36899,48 @@ func (m *Shoot) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Addons == nil { + m.Addons = &Addons{} + } + if err := m.Addons.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CloudProfileName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CloudProfileName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29792,13 +36967,16 @@ func (m *Shoot) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.DNS == nil { + m.DNS = &DNS{} + } + if err := m.DNS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29825,66 +37003,50 @@ func (m *Shoot) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Extensions = append(m.Extensions, Extension{}) + if err := m.Extensions[len(m.Extensions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hibernation", wireType) } - if skippy < 0 { + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ShootList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.Hibernation == nil { + m.Hibernation = &Hibernation{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.Hibernation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ShootList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ShootList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kubernetes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29911,13 +37073,13 @@ func (m *ShootList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Kubernetes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Networking", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29944,69 +37106,51 @@ func (m *ShootList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Shoot{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Networking.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Maintenance", wireType) } - if skippy < 0 { + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ShootMachineImage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.Maintenance == nil { + m.Maintenance = &Maintenance{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.Maintenance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ShootMachineImage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ShootMachineImage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Monitoring", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30016,27 +37160,31 @@ func (m *ShootMachineImage) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if m.Monitoring == nil { + m.Monitoring = &Monitoring{} + } + if err := m.Monitoring.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProviderConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30063,16 +37211,13 @@ func (m *ShootMachineImage) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ProviderConfig == nil { - m.ProviderConfig = &ProviderConfig{} - } - if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30100,65 +37245,44 @@ func (m *ShootMachineImage) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Version = &s + s := ShootPurpose(dAtA[iNdEx:postIndex]) + m.Purpose = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ShootNetworks) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ShootNetworks: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ShootNetworks: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Region = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretBindingName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30186,12 +37310,11 @@ func (m *ShootNetworks) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Pods = &s + m.SecretBindingName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SeedName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30220,64 +37343,47 @@ func (m *ShootNetworks) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Services = &s + m.SeedName = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeedSelector", wireType) } - if skippy < 0 { + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ShootSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.SeedSelector == nil { + m.SeedSelector = &SeedSelector{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.SeedSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ShootSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ShootSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 16: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addons", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30304,18 +37410,16 @@ func (m *ShootSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Addons == nil { - m.Addons = &Addons{} - } - if err := m.Addons.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Resources = append(m.Resources, NamedResourceReference{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 17: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CloudProfileName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30325,27 +37429,82 @@ func (m *ShootSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.CloudProfileName = string(dAtA[iNdEx:postIndex]) + m.Tolerations = append(m.Tolerations, Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShootStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShootStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShootStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DNS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30372,16 +37531,14 @@ func (m *ShootSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.DNS == nil { - m.DNS = &DNS{} - } - if err := m.DNS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30408,14 +37565,14 @@ func (m *ShootSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Extensions = append(m.Extensions, Extension{}) - if err := m.Extensions[len(m.Extensions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Constraints = append(m.Constraints, Condition{}) + if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hibernation", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Gardener", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30442,18 +37599,15 @@ func (m *ShootSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Hibernation == nil { - m.Hibernation = &Hibernation{} - } - if err := m.Hibernation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Gardener.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kubernetes", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsHibernated", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30463,28 +37617,15 @@ func (m *ShootSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Kubernetes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: + m.IsHibernated = bool(v != 0) + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Networking", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastOperation", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30511,13 +37652,16 @@ func (m *ShootSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Networking.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.LastOperation == nil { + m.LastOperation = &LastOperation{} + } + if err := m.LastOperation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Maintenance", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastErrors", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30544,18 +37688,16 @@ func (m *ShootSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Maintenance == nil { - m.Maintenance = &Maintenance{} - } - if err := m.Maintenance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.LastErrors = append(m.LastErrors, LastError{}) + if err := m.LastErrors[len(m.LastErrors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Monitoring", wireType) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) } - var msglen int + m.ObservedGeneration = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30565,31 +37707,14 @@ func (m *ShootSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Monitoring == nil { - m.Monitoring = &Monitoring{} - } - if err := m.Monitoring.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RetryCycleStartTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30616,13 +37741,16 @@ func (m *ShootSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.RetryCycleStartTime == nil { + m.RetryCycleStartTime = &v11.Time{} + } + if err := m.RetryCycleStartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 11: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Purpose", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SeedName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30650,12 +37778,12 @@ func (m *ShootSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := ShootPurpose(dAtA[iNdEx:postIndex]) - m.Purpose = &s + s := string(dAtA[iNdEx:postIndex]) + m.SeedName = &s iNdEx = postIndex - case 12: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TechnicalID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30683,11 +37811,11 @@ func (m *ShootSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Region = string(dAtA[iNdEx:postIndex]) + m.TechnicalID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 13: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretBindingName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30715,11 +37843,11 @@ func (m *ShootSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretBindingName = string(dAtA[iNdEx:postIndex]) + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 14: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SeedName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIdentity", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30748,7 +37876,7 @@ func (m *ShootSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.SeedName = &s + m.ClusterIdentity = &s iNdEx = postIndex default: iNdEx = preIndex @@ -30774,7 +37902,7 @@ func (m *ShootSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ShootStatus) Unmarshal(dAtA []byte) error { +func (m *Toleration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30797,17 +37925,17 @@ func (m *ShootStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShootStatus: wiretype end group for non-group") + return fmt.Errorf("proto: Toleration: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShootStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Toleration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30817,31 +37945,29 @@ func (m *ShootStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30851,62 +37977,81 @@ func (m *ShootStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Constraints = append(m.Constraints, Condition{}) - if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Value = &s iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Gardener", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - if msglen < 0 { + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if err := m.Gardener.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VerticalPodAutoscaler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - iNdEx = postIndex - case 4: + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VerticalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VerticalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IsHibernated", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -30923,10 +38068,10 @@ func (m *ShootStatus) Unmarshal(dAtA []byte) error { break } } - m.IsHibernated = bool(v != 0) - case 5: + m.Enabled = bool(v != 0) + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastOperation", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EvictAfterOOMThreshold", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30953,18 +38098,18 @@ func (m *ShootStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.LastOperation == nil { - m.LastOperation = &LastOperation{} + if m.EvictAfterOOMThreshold == nil { + m.EvictAfterOOMThreshold = &v11.Duration{} } - if err := m.LastOperation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.EvictAfterOOMThreshold.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastErrors", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EvictionRateBurst", wireType) } - var msglen int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30974,31 +38119,53 @@ func (m *ShootStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated + m.EvictionRateBurst = &v + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field EvictionRateLimit", wireType) } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.EvictionRateLimit = &v2 + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field EvictionTolerance", wireType) + } + var v uint64 + if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } - m.LastErrors = append(m.LastErrors, LastError{}) - if err := m.LastErrors[len(m.LastErrors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.EvictionTolerance = &v2 + case 6: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field RecommendationMarginFraction", wireType) } - iNdEx = postIndex + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.RecommendationMarginFraction = &v2 case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdaterInterval", wireType) } - m.ObservedGeneration = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31008,14 +38175,31 @@ func (m *ShootStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdaterInterval == nil { + m.UpdaterInterval = &v11.Duration{} + } + if err := m.UpdaterInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RetryCycleStartTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RecommenderInterval", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31042,16 +38226,69 @@ func (m *ShootStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RetryCycleStartTime == nil { - m.RetryCycleStartTime = &v11.Time{} + if m.RecommenderInterval == nil { + m.RecommenderInterval = &v11.Duration{} } - if err := m.RetryCycleStartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RecommenderInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 9: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Volume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Volume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SeedName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31080,11 +38317,11 @@ func (m *ShootStatus) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.SeedName = &s + m.Name = &s iNdEx = postIndex - case 10: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TechnicalID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31112,11 +38349,12 @@ func (m *ShootStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TechnicalID = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.Type = &s iNdEx = postIndex - case 11: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeSize", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31144,8 +38382,29 @@ func (m *ShootStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + m.VolumeSize = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Encrypted", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Encrypted = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -31170,7 +38429,7 @@ func (m *ShootStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *Volume) Unmarshal(dAtA []byte) error { +func (m *VolumeType) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31193,15 +38452,15 @@ func (m *Volume) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Volume: wiretype end group for non-group") + return fmt.Errorf("proto: VolumeType: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VolumeType: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Class", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31229,12 +38488,11 @@ func (m *Volume) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Name = &s + m.Class = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31262,44 +38520,11 @@ func (m *Volume) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Type = &s + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeSize", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeSize = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Encrypted", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Usable", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -31317,7 +38542,7 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } } b := bool(v != 0) - m.Encrypted = &b + m.Usable = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -31342,7 +38567,7 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } return nil } -func (m *VolumeType) Unmarshal(dAtA []byte) error { +func (m *WatchCacheSizes) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31365,17 +38590,17 @@ func (m *VolumeType) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VolumeType: wiretype end group for non-group") + return fmt.Errorf("proto: WatchCacheSizes: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeType: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WatchCacheSizes: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Class", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31385,29 +38610,17 @@ func (m *VolumeType) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Class = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.Default = &v case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31417,45 +38630,26 @@ func (m *VolumeType) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Usable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + m.Resources = append(m.Resources, ResourceWatchCacheSize{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - b := bool(v != 0) - m.Usable = &b + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32073,7 +39267,7 @@ func (m *Worker) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ProviderConfig == nil { - m.ProviderConfig = &ProviderConfig{} + m.ProviderConfig = &runtime.RawExtension{} } if err := m.ProviderConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -32178,7 +39372,7 @@ func (m *Worker) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DataVolumes = append(m.DataVolumes, Volume{}) + m.DataVolumes = append(m.DataVolumes, DataVolume{}) if err := m.DataVolumes[len(m.DataVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -32248,6 +39442,78 @@ func (m *Worker) Unmarshal(dAtA []byte) error { } m.Zones = append(m.Zones, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SystemComponents", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SystemComponents == nil { + m.SystemComponents = &WorkerSystemComponents{} + } + if err := m.SystemComponents.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MachineControllerManagerSettings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MachineControllerManagerSettings == nil { + m.MachineControllerManagerSettings = &MachineControllerManagerSettings{} + } + if err := m.MachineControllerManagerSettings.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32361,6 +39627,79 @@ func (m *WorkerKubernetes) Unmarshal(dAtA []byte) error { } return nil } +func (m *WorkerSystemComponents) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkerSystemComponents: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkerSystemComponents: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allow = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto index 41be5a8d0..d6bc19e90 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ syntax = 'proto2'; package github.com.gardener.gardener.pkg.apis.core.v1beta1; +import "k8s.io/api/autoscaling/v1/generated.proto"; import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/api/rbac/v1/generated.proto"; import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; @@ -55,7 +56,7 @@ message AdmissionPlugin { // Config is the configuration of the plugin. // +optional - optional ProviderConfig config = 2; + optional k8s.io.apimachinery.pkg.runtime.RawExtension config = 2; } // Alerting contains information about how alerting will be done (i.e. who will receive alerts and how). @@ -132,7 +133,7 @@ message BackupBucketSpec { // ProviderConfig is the configuration passed to BackupBucket resource. // +optional - optional ProviderConfig providerConfig = 2; + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2; // SecretRef is a reference to a secret that contains the credentials to access object store. optional k8s.io.api.core.v1.SecretReference secretRef = 3; @@ -146,7 +147,7 @@ message BackupBucketSpec { message BackupBucketStatus { // ProviderStatus is the configuration passed to BackupBucket resource. // +optional - optional ProviderConfig providerStatus = 1; + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerStatus = 1; // LastOperation holds information about the last operation on the BackupBucket. // +optional @@ -279,7 +280,7 @@ message CloudProfileSpec { // ProviderConfig contains provider-specific configuration for the profile. // +optional - optional ProviderConfig providerConfig = 5; + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 5; // Regions contains constraints regarding allowed values for regions and zones. // +patchMergeKey=name @@ -289,8 +290,10 @@ message CloudProfileSpec { // SeedSelector contains an optional list of labels on `Seed` resources that marks those seeds whose shoots may use this provider profile. // An empty list means that all seeds of the same provider type are supported. // This is useful for environments that are of the same type (like openstack) but may have different "instances"/landscapes. + // Optionally a list of possible providers can be added to enable cross-provider scheduling. By default, the provider + // type of the seed must match the shoot's provider. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector seedSelector = 7; + optional SeedSelector seedSelector = 7; // Type is the name of the provider. optional string type = 8; @@ -302,7 +305,7 @@ message CloudProfileSpec { repeated VolumeType volumeTypes = 9; } -// ClusterAutoscaler contains the configration flags for the Kubernetes cluster autoscaler. +// ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler. message ClusterAutoscaler { // ScaleDownDelayAfterAdd defines how long after scale up that scale down evaluation resumes (default: 1 hour). // +optional @@ -357,6 +360,10 @@ message Condition { // A human readable message indicating details about the transition. optional string message = 6; + + // Well-defined error codes in case the condition reports a problem. + // +optional + repeated string codes = 7; } // ContainerRuntime contains information about worker's available container runtime @@ -366,7 +373,7 @@ message ContainerRuntime { // ProviderConfig is the configuration passed to container runtime resource. // +optional - optional ProviderConfig providerConfig = 2; + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2; } // ControllerDeployment contains information for how this controller is deployed. @@ -376,7 +383,17 @@ message ControllerDeployment { // ProviderConfig contains type-specific configuration. // +optional - optional ProviderConfig providerConfig = 2; + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2; + + // Policy controls how the controller is deployed. It defaults to 'OnDemand'. + // +optional + optional string policy = 3; + + // SeedSelector contains an optional label selector for seeds. Only if the labels match then this controller will be + // considered for a deployment. + // An empty list means that all seeds are selected. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector seedSelector = 4; } // ControllerInstallation represents an installation request for an external controller. @@ -420,7 +437,7 @@ message ControllerInstallationStatus { // ProviderStatus contains type-specific status. // +optional - optional ProviderConfig providerStatus = 2; + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerStatus = 2; } // ControllerRegistration represents a registration of an external controller. @@ -446,6 +463,7 @@ message ControllerRegistrationList { message ControllerRegistrationSpec { // Resources is a list of combinations of kinds (DNSProvider, Infrastructure, Generic, ...) and their actual types // (aws-route53, gcp, auditlog, ...). + // +optional repeated ControllerResource resources = 1; // Deployment contains information for how this controller is deployed. @@ -469,12 +487,18 @@ message ControllerResource { // ReconcileTimeout defines how long Gardener should wait for the resource reconciliation. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration reconcileTimeout = 4; + + // Primary determines if the controller backed by this ControllerRegistration is responsible for the extension + // resource's lifecycle. This field defaults to true. There must be exactly one primary controller for this kind/type + // combination. + // +optional + optional bool primary = 5; } // DNS holds information about the provider, the hosted zone id and the domain. message DNS { // Domain is the external available domain of the Shoot cluster. This domain will be written into the - // kubeconfig that is handed out to end-users. + // kubeconfig that is handed out to end-users. Once set it is immutable. // +optional optional string domain = 1; @@ -522,6 +546,23 @@ message DNSProvider { optional DNSIncludeExclude zones = 5; } +// DataVolume contains information about a data volume. +message DataVolume { + // Name of the volume to make it referencable. + optional string name = 1; + + // Type is the type of the volume. + // +optional + optional string type = 2; + + // VolumeSize is the size of the volume. + optional string size = 3; + + // Encrypted determines if the volume should be encrypted. + // +optional + optional bool encrypted = 4; +} + // Endpoint is an endpoint for monitoring, logging and other services around the plant. message Endpoint { // Name is the name of the endpoint @@ -555,7 +596,11 @@ message Extension { // ProviderConfig is the configuration passed to extension resource. // +optional - optional ProviderConfig providerConfig = 2; + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2; + + // Disabled allows to disable extensions that were marked as 'globally enabled' by Gardener administrators. + // +optional + optional bool disabled = 3; } // Gardener holds the information about the Gardener version that operated a resource. @@ -631,6 +676,26 @@ message HorizontalPodAutoscalerConfig { optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration upscaleDelay = 7; } +// Ingress configures the Ingress specific settings of the Seed cluster +message Ingress { + // Domain specifies the IngressDomain of the Seed cluster pointing to the ingress controller endpoint. It will be used + // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable. + optional string domain = 1; + + // Controller configures a Gardener managed Ingress Controller listening on the ingressDomain + optional IngressController controller = 2; +} + +// IngressController enables a Gardener managed Ingress Controller listening on the ingressDomain +message IngressController { + // Kind defines which kind of IngressController to use, for example `nginx` + optional string kind = 1; + + // ProviderConfig specifies infrastructure specific configuration for the ingressController + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2; +} + // KubeAPIServerConfig contains configuration settings for the kube-apiserver. message KubeAPIServerConfig { optional KubernetesConfig kubernetesConfig = 1; @@ -668,6 +733,33 @@ message KubeAPIServerConfig { // of the kube-apiserver. // +optional optional ServiceAccountConfig serviceAccountConfig = 8; + + // WatchCacheSizes contains configuration of the API server's watch cache sizes. + // Configuring these flags might be useful for large-scale Shoot clusters with a lot of parallel update requests + // and a lot of watching controllers (e.g. large shooted Seed clusters). When the API server's watch cache's + // capacity is too small to cope with the amount of update requests and watchers for a particular resource, it + // might happen that controller watches are permanently stopped with `too old resource version` errors. + // Starting from kubernetes v1.19, the API server's watch cache size is adapted dynamically and setting the watch + // cache size flags will have no effect, except when setting it to 0 (which disables the watch cache). + // +optional + optional WatchCacheSizes watchCacheSizes = 9; + + // Requests contains configuration for request-specific settings for the kube-apiserver. + // +optional + optional KubeAPIServerRequests requests = 10; +} + +// KubeAPIServerRequests contains configuration for request-specific settings for the kube-apiserver. +message KubeAPIServerRequests { + // MaxNonMutatingInflight is the maximum number of non-mutating requests in flight at a given time. When the server + // exceeds this, it rejects requests. + // +optional + optional int32 maxNonMutatingInflight = 1; + + // MaxMutatingInflight is the maximum number of mutating requests in flight at a given time. When the server + // exceeds this, it rejects requests. + // +optional + optional int32 maxMutatingInflight = 2; } // KubeControllerManagerConfig contains configuration settings for the kube-controller-manager. @@ -681,6 +773,10 @@ message KubeControllerManagerConfig { // NodeCIDRMaskSize defines the mask size for node cidr in cluster (default is 24) // +optional optional int32 nodeCIDRMaskSize = 3; + + // PodEvictionTimeout defines the grace period for deleting pods on failed nodes. Defaults to 2m. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration podEvictionTimeout = 4; } // KubeProxyConfig contains configuration settings for the kube-proxy. @@ -696,6 +792,13 @@ message KubeProxyConfig { // KubeSchedulerConfig contains configuration settings for the kube-scheduler. message KubeSchedulerConfig { optional KubernetesConfig kubernetesConfig = 1; + + // KubeMaxPDVols allows to configure the `KUBE_MAX_PD_VOLS` environment variable for the kube-scheduler. + // Please find more information here: https://kubernetes.io/docs/concepts/storage/storage-limits/#custom-limits + // Note that using this field is considered alpha-/experimental-level and is on your own risk. You should be aware + // of all the side-effects and consequences when changing it. + // +optional + optional string kubeMaxPDVols = 2; } // KubeletConfig contains configuration settings for the kubelet. @@ -768,6 +871,21 @@ message KubeletConfig { // +optional // Default: 1m optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration imagePullProgressDeadline = 12; + + // FailSwapOn makes the Kubelet fail to start if swap is enabled on the node. (default true). + // +optional + optional bool failSwapOn = 13; + + // KubeReserved is the configuration for resources reserved for kubernetes node components (mainly kubelet and container runtime). + // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied. + // +optional + // Default: cpu=80m,memory=1Gi,pid=20k + optional KubeletConfigReserved kubeReserved = 14; + + // SystemReserved is the configuration for resources reserved for system processes not managed by kubernetes (e.g. journald). + // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied. + // +optional + optional KubeletConfigReserved systemReserved = 15; } // KubeletConfigEviction contains kubelet eviction thresholds supporting either a resource.Quantity or a percentage based value. @@ -839,13 +957,33 @@ message KubeletConfigEvictionSoftGracePeriod { optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration nodeFSInodesFree = 5; } +// KubeletConfigReserved contains reserved resources for daemons +message KubeletConfigReserved { + // CPU is the reserved cpu. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity cpu = 1; + + // Memory is the reserved memory. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity memory = 2; + + // EphemeralStorage is the reserved ephemeral-storage. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity ephemeralStorage = 3; + + // PID is the reserved process-ids. + // To reserve PID, the SupportNodePidsLimit feature gate must be enabled in Kubernetes versions < 1.15. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity pid = 4; +} + // Kubernetes contains the version and configuration variables for the Shoot control plane. message Kubernetes { // AllowPrivilegedContainers indicates whether privileged containers are allowed in the Shoot (default: true). // +optional optional bool allowPrivilegedContainers = 1; - // ClusterAutoscaler contains the configration flags for the Kubernetes cluster autoscaler. + // ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler. // +optional optional ClusterAutoscaler clusterAutoscaler = 2; @@ -871,6 +1009,10 @@ message Kubernetes { // Version is the semantic Kubernetes version to use for the Shoot cluster. optional string version = 8; + + // VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler. + // +optional + optional VerticalPodAutoscaler verticalPodAutoscaler = 9; } // KubernetesConfig contains common configuration fields for the control plane components. @@ -952,15 +1094,47 @@ message Machine { optional ShootMachineImage image = 2; } +// MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout. +message MachineControllerManagerSettings { + // MachineDrainTimeout is the period after which machine is forcefully deleted. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration machineDrainTimeout = 1; + + // MachineHealthTimeout is the period after which machine is declared failed. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration machineHealthTimeout = 2; + + // MachineCreationTimeout is the period after which creation of the machine is declared failed. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration machineCreationTimeout = 3; + + // MaxEvictRetries are the number of eviction retries on a pod after which drain is declared failed, and forceful deletion is triggered. + // +optional + optional int32 maxEvictRetries = 4; + + // NodeConditions are the set of conditions if set to true for the period of MachineHealthTimeout, machine will be declared failed. + // +optional + repeated string nodeConditions = 5; +} + // MachineImage defines the name and multiple versions of the machine image in any environment. message MachineImage { // Name is the name of the image. optional string name = 1; - // Versions contains versions and expiration dates of the machine image + // Versions contains versions, expiration dates and container runtimes of the machine image // +patchMergeKey=version // +patchStrategy=merge - repeated ExpirableVersion versions = 2; + repeated MachineImageVersion versions = 2; +} + +// MachineImageVersion is an expirable version with list of supported container runtimes and interfaces +message MachineImageVersion { + optional ExpirableVersion expirableVersion = 1; + + // CRI list of supported container runtime and interfaces supported by this version + // +optional + repeated CRI cri = 2; } // MachineType contains certain properties of a machine type. @@ -1008,6 +1182,12 @@ message Maintenance { // TimeWindow contains information about the time window for maintenance operations. // +optional optional MaintenanceTimeWindow timeWindow = 2; + + // ConfineSpecUpdateRollout prevents that changes/updates to the shoot specification will be rolled out immediately. + // Instead, they are rolled out during the shoot's maintenance time window. There is one exception that will trigger + // an immediate roll out which is changes to the Spec.Hibernation.Enabled field. + // +optional + optional bool confineSpecUpdateRollout = 3; } // MaintenanceAutoUpdate contains information about which constraints should be automatically updated. @@ -1037,6 +1217,15 @@ message Monitoring { optional Alerting alerting = 1; } +// NamedResourceReference is a named reference to a resource. +message NamedResourceReference { + // Name of the resource reference. + optional string name = 1; + + // ResourceRef is a reference to a resource. + optional k8s.io.api.autoscaling.v1.CrossVersionObjectReference resourceRef = 2; +} + // Networking defines networking parameters for the shoot cluster. message Networking { // Type identifies the type of the networking plugin. @@ -1044,7 +1233,7 @@ message Networking { // ProviderConfig is the configuration passed to network resource. // +optional - optional ProviderConfig providerConfig = 2; + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2; // Pods is the CIDR of the pod network. // +optional @@ -1063,7 +1252,7 @@ message Networking { message NginxIngress { optional Addon addon = 1; - // LoadBalancerSourceRanges is list of whitelist IP sources for NginxIngress + // LoadBalancerSourceRanges is list of allowed IP sources for NginxIngress // +optional repeated string loadBalancerSourceRanges = 2; @@ -1262,6 +1451,10 @@ message ProjectSpec { // A nil value means that Gardener will determine the name of the namespace. // +optional optional string namespace = 6; + + // Tolerations contains the tolerations for taints on seed clusters. + // +optional + optional ProjectTolerations tolerations = 7; } // ProjectStatus holds the most recently observed status of the project. @@ -1272,6 +1465,31 @@ message ProjectStatus { // Phase is the current phase of the project. optional string phase = 2; + + // StaleSinceTimestamp contains the timestamp when the project was first discovered to be stale/unused. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time staleSinceTimestamp = 3; + + // StaleAutoDeleteTimestamp contains the timestamp when the project will be garbage-collected/automatically deleted + // because it's stale/unused. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time staleAutoDeleteTimestamp = 4; +} + +// ProjectTolerations contains the tolerations for taints on seed clusters. +message ProjectTolerations { + // Defaults contains a list of tolerations that are added to the shoots in this project by default. + // +patchMergeKey=key + // +patchStrategy=merge + // +optional + repeated Toleration defaults = 1; + + // Whitelist contains a list of tolerations that are allowed to be added to the shoots in this project. Please note + // that this list may only be added by users having the `spec-tolerations-whitelist` verb for project resources. + // +patchMergeKey=key + // +patchStrategy=merge + // +optional + repeated Toleration whitelist = 2; } // Provider contains provider-specific information that are handed-over to the provider-specific @@ -1283,12 +1501,12 @@ message Provider { // ControlPlaneConfig contains the provider-specific control plane config blob. Please look up the concrete // definition in the documentation of your provider extension. // +optional - optional ProviderConfig controlPlaneConfig = 2; + optional k8s.io.apimachinery.pkg.runtime.RawExtension controlPlaneConfig = 2; // InfrastructureConfig contains the provider-specific infrastructure config blob. Please look up the concrete // definition in the documentation of your provider extension. // +optional - optional ProviderConfig infrastructureConfig = 3; + optional k8s.io.apimachinery.pkg.runtime.RawExtension infrastructureConfig = 3; // Workers is a list of worker groups. // +patchMergeKey=name @@ -1296,13 +1514,6 @@ message Provider { repeated Worker workers = 4; } -// ProviderConfig is a workaround for missing OpenAPI functions on runtime.RawExtension struct. -// https://github.com/kubernetes/kubernetes/issues/55890 -// https://github.com/kubernetes-sigs/cluster-api/issues/137 -message ProviderConfig { - optional k8s.io.apimachinery.pkg.runtime.RawExtension rawExtension = 1; -} - message Quota { // Standard object metadata. // +optional @@ -1346,6 +1557,27 @@ message Region { // +patchStrategy=merge // +optional repeated AvailabilityZone zones = 2; + + // Labels is an optional set of key-value pairs that contain certain administrator-controlled labels for this region. + // It can be used by Gardener administrators/operators to provide additional information about a region, e.g. wrt + // quality, reliability, access restrictions, etc. + // +optional + map labels = 3; +} + +// ResourceWatchCacheSize contains configuration of the API server's watch cache size for one specific resource. +message ResourceWatchCacheSize { + // APIGroup is the API group of the resource for which the watch cache size should be configured. + // An unset value is used to specify the legacy core API (e.g. for `secrets`). + // +optional + optional string apiGroup = 1; + + // Resource is the name of the resource for which the watch cache size should be configured + // (in lowercase plural form, e.g. `secrets`). + optional string resource = 2; + + // CacheSize specifies the watch cache size that should be configured for the specified resource. + optional int32 size = 3; } message SecretBinding { @@ -1390,7 +1622,7 @@ message SeedBackup { // ProviderConfig is the configuration passed to BackupBucket resource. // +optional - optional ProviderConfig providerConfig = 2; + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2; // Region is a region name. // +optional @@ -1405,8 +1637,31 @@ message SeedBackup { // SeedDNS contains DNS-relevant information about this seed cluster. message SeedDNS { // IngressDomain is the domain of the Seed cluster pointing to the ingress controller endpoint. It will be used - // to construct ingress URLs for system applications running in Shoot clusters. + // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable. + // This will be removed in the next API version and replaced by spec.ingress.domain. + // +optional optional string ingressDomain = 1; + + // Provider configures a DNSProvider + // +optional + optional SeedDNSProvider provider = 2; +} + +// SeedDNSProvider configures a DNSProvider for Seeds +message SeedDNSProvider { + // Type describes the type of the dns-provider, for example `aws-route53` + optional string type = 1; + + // SecretRef is a reference to a Secret object containing cloud provider credentials used for registering external domains. + optional k8s.io.api.core.v1.SecretReference secretRef = 2; + + // Domains contains information about which domains shall be included/excluded for this provider. + // +optional + optional DNSIncludeExclude domains = 3; + + // Zones contains information about which hosted zones shall be included/excluded for this provider. + // +optional + optional DNSIncludeExclude zones = 4; } // SeedList is a collection of Seeds. @@ -1448,12 +1703,87 @@ message SeedProvider { // ProviderConfig is the configuration passed to Seed resource. // +optional - optional ProviderConfig providerConfig = 2; + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2; // Region is a name of a region. optional string region = 3; } +// SeedSelector contains constraints for selecting seed to be usable for shoots using a profile +message SeedSelector { + // LabelSelector is optional and can be used to select seeds by their label settings + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; + + // Providers is optional and can be used by restricting seeds by their provider type. '*' can be used to enable seeds regardless of their provider type. + // +optional + repeated string providerTypes = 2; +} + +// SeedSettingExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the +// seed. When enabled then this is done via PodPriority and requires the Seed cluster to have Kubernetes version 1.11 +// or the PodPriority feature gate as well as the scheduling.k8s.io/v1alpha1 API group enabled. +message SeedSettingExcessCapacityReservation { + // Enabled controls whether the excess capacity reservation should be enabled. + optional bool enabled = 1; +} + +// SeedSettingLoadBalancerServices controls certain settings for services of type load balancer that are created in the +// seed. +message SeedSettingLoadBalancerServices { + // Annotations is a map of annotations that will be injected/merged into every load balancer service object. + // +optional + map annotations = 1; +} + +// SeedSettingScheduling controls settings for scheduling decisions for the seed. +message SeedSettingScheduling { + // Visible controls whether the gardener-scheduler shall consider this seed when scheduling shoots. Invisible seeds + // are not considered by the scheduler. + optional bool visible = 1; +} + +// SeedSettingShootDNS controls the shoot DNS settings for the seed. +message SeedSettingShootDNS { + // Enabled controls whether the DNS for shoot clusters should be enabled. When disabled then all shoots using the + // seed won't get any DNS providers, DNS records, and no DNS extension controller is required to be installed here. + // This is useful for environments where DNS is not required. + optional bool enabled = 1; +} + +// SeedSettingVerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the +// seed. +message SeedSettingVerticalPodAutoscaler { + // Enabled controls whether the VPA components shall be deployed into the garden namespace in the seed cluster. It + // is enabled by default because Gardener heavily relies on a VPA being deployed. You should only disable this if + // your seed cluster already has another, manually/custom managed VPA deployment. + optional bool enabled = 1; +} + +// SeedSettings contains certain settings for this seed cluster. +message SeedSettings { + // ExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the seed. + // +optional + optional SeedSettingExcessCapacityReservation excessCapacityReservation = 1; + + // Scheduling controls settings for scheduling decisions for the seed. + // +optional + optional SeedSettingScheduling scheduling = 2; + + // ShootDNS controls the shoot DNS settings for the seed. + // +optional + optional SeedSettingShootDNS shootDNS = 3; + + // LoadBalancerServices controls certain settings for services of type load balancer that are created in the + // seed. + // +optional + optional SeedSettingLoadBalancerServices loadBalancerServices = 4; + + // VerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the seed. + // +optional + optional SeedSettingVerticalPodAutoscaler verticalPodAutoscaler = 5; +} + // SeedSpec is the specification of a Seed. message SeedSpec { // Backup holds the object store configuration for the backups of shoot (currently only etcd). @@ -1484,6 +1814,14 @@ message SeedSpec { // Volume contains settings for persistentvolumes created in the seed cluster. // +optional optional SeedVolume volume = 7; + + // Settings contains certain settings for this seed cluster. + // +optional + optional SeedSettings settings = 8; + + // Ingress configures Ingress specific settings of the Seed cluster. + // +optional + optional Ingress ingress = 9; } // SeedStatus is the status of a Seed. @@ -1506,6 +1844,19 @@ message SeedStatus { // Seed's generation, which is updated on mutation by the API Server. // +optional optional int64 observedGeneration = 4; + + // ClusterIdentity is the identity of the Seed cluster + // +optional + optional string clusterIdentity = 5; + + // Capacity represents the total resources of a seed. + // +optional + map capacity = 6; + + // Allocatable represents the resources of a seed that are available for scheduling. + // Defaults to Capacity. + // +optional + map allocatable = 7; } // SeedTaint describes a taint on a seed. @@ -1518,6 +1869,17 @@ message SeedTaint { optional string value = 2; } +// SeedTemplate is a template for creating a Seed object. +message SeedTemplate { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the Seed. + // +optional + optional SeedSpec spec = 2; +} + // SeedVolume contains settings for persistentvolumes created in the seed cluster. message SeedVolume { // MinimumSize defines the minimum size that should be used for PVCs in the seed. @@ -1587,7 +1949,7 @@ message ShootMachineImage { // ProviderConfig is the shoot's individual configuration passed to an extension resource. // +optional - optional ProviderConfig providerConfig = 2; + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 2; // Version is the version of the shoot's image. // If version is not provided, it will be defaulted to the latest version from the CloudProfile. @@ -1659,6 +2021,20 @@ message ShootSpec { // SeedName is the name of the seed cluster that runs the control plane of the Shoot. // +optional optional string seedName = 14; + + // SeedSelector is an optional selector which must match a seed's labels for the shoot to be scheduled on that seed. + // +optional + optional SeedSelector seedSelector = 15; + + // Resources holds a list of named resource references that can be referred to in extension configs by their names. + // +optional + repeated NamedResourceReference resources = 16; + + // Tolerations contains the tolerations for taints on seed clusters. + // +patchMergeKey=key + // +patchStrategy=merge + // +optional + repeated Toleration tolerations = 17; } // ShootStatus holds the most recently observed status of the Shoot cluster. @@ -1711,9 +2087,61 @@ message ShootStatus { // UID is a unique identifier for the Shoot cluster to avoid portability between Kubernetes clusters. // It is used to compute unique hashes. optional string uid = 11; + + // ClusterIdentity is the identity of the Shoot cluster + // +optional + optional string clusterIdentity = 12; } -// Volume contains information about the volume type and size. +// Toleration is a toleration for a seed taint. +message Toleration { + // Key is the toleration key to be applied to a project or shoot. + optional string key = 1; + + // Value is the toleration value corresponding to the toleration key. + // +optional + optional string value = 2; +} + +// VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler. +message VerticalPodAutoscaler { + // Enabled specifies whether the Kubernetes VPA shall be enabled for the shoot cluster. + optional bool enabled = 1; + + // EvictAfterOOMThreshold defines the threshold that will lead to pod eviction in case it OOMed in less than the given + // threshold since its start and if it has only one container (default: 10m0s). + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration evictAfterOOMThreshold = 2; + + // EvictionRateBurst defines the burst of pods that can be evicted (default: 1) + // +optional + optional int32 evictionRateBurst = 3; + + // EvictionRateLimit defines the number of pods that can be evicted per second. A rate limit set to 0 or -1 will + // disable the rate limiter (default: -1). + // +optional + optional double evictionRateLimit = 4; + + // EvictionTolerance defines the fraction of replica count that can be evicted for update in case more than one + // pod can be evicted (default: 0.5). + // +optional + optional double evictionTolerance = 5; + + // RecommendationMarginFraction is the fraction of usage added as the safety margin to the recommended request + // (default: 0.15). + // +optional + optional double recommendationMarginFraction = 6; + + // UpdaterInterval is the interval how often the updater should run (default: 1m0s). + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration updaterInterval = 7; + + // RecommenderInterval is the interval how often metrics should be fetched (default: 1m0s). + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration recommenderInterval = 8; +} + +// Volume contains information about the volume type, size, and encryption. message Volume { // Name of the volume to make it referencable. // +optional @@ -1744,6 +2172,21 @@ message VolumeType { optional bool usable = 3; } +// WatchCacheSizes contains configuration of the API server's watch cache sizes. +message WatchCacheSizes { + // Default configures the default watch cache size of the kube-apiserver + // (flag `--default-watch-cache-size`, defaults to 100). + // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/ + // +optional + optional int32 default = 1; + + // Resources configures the watch cache size of the kube-apiserver per resource + // (flag `--watch-cache-sizes`). + // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/ + // +optional + repeated ResourceWatchCacheSize resources = 2; +} + // Worker is the base definition of a worker group. message Worker { // Annotations is a map of key/value pairs for annotations for all the `Node` objects in this worker pool. @@ -1788,7 +2231,7 @@ message Worker { // ProviderConfig is the provider-specific configuration for this worker pool. // +optional - optional ProviderConfig providerConfig = 12; + optional k8s.io.apimachinery.pkg.runtime.RawExtension providerConfig = 12; // Taints is a list of taints for all the `Node` objects in this worker pool. // +optional @@ -1800,7 +2243,7 @@ message Worker { // DataVolumes contains a list of additional worker volumes. // +optional - repeated Volume dataVolumes = 15; + repeated DataVolume dataVolumes = 15; // KubeletDataVolumeName contains the name of a dataVolume that should be used for storing kubelet state. // +optional @@ -1810,6 +2253,14 @@ message Worker { // as not every provider may support availability zones. // +optional repeated string zones = 17; + + // SystemComponents contains configuration for system components related to this worker pool + // +optional + optional WorkerSystemComponents systemComponents = 18; + + // MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout. + // +optional + optional MachineControllerManagerSettings machineControllerManager = 19; } // WorkerKubernetes contains configuration for Kubernetes components related to this worker pool. @@ -1819,3 +2270,9 @@ message WorkerKubernetes { optional KubeletConfig kubelet = 1; } +// WorkerSystemComponents contains configuration for system components related to this worker pool +message WorkerSystemComponents { + // Allow determines whether the pool should be allowed to host system components or not (defaults to true) + optional bool allow = 1; +} + diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/condition_builder.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/condition_builder.go index d8c47fd8b..c539b1830 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/condition_builder.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/condition_builder.go @@ -17,33 +17,36 @@ package helper import ( "fmt" - api "github.com/gardener/gardener/pkg/apis/core/v1beta1" + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // ConditionBuilder build a Condition. type ConditionBuilder interface { - WithOldCondition(old api.Condition) ConditionBuilder - WithStatus(status api.ConditionStatus) ConditionBuilder + WithOldCondition(old gardencorev1beta1.Condition) ConditionBuilder + WithStatus(status gardencorev1beta1.ConditionStatus) ConditionBuilder WithReason(reason string) ConditionBuilder WithMessage(message string) ConditionBuilder + WithCodes(codes ...gardencorev1beta1.ErrorCode) ConditionBuilder WithNowFunc(now func() metav1.Time) ConditionBuilder - Build() (new api.Condition, updated bool) + Build() (new gardencorev1beta1.Condition, updated bool) } // defaultConditionBuilder build a Condition. type defaultConditionBuilder struct { - old api.Condition - status api.ConditionStatus - conditionType api.ConditionType + old gardencorev1beta1.Condition + status gardencorev1beta1.ConditionStatus + conditionType gardencorev1beta1.ConditionType reason string message string + codes []gardencorev1beta1.ErrorCode nowFunc func() metav1.Time } // NewConditionBuilder returns a ConditionBuilder for a specific condition. -func NewConditionBuilder(conditionType api.ConditionType) (ConditionBuilder, error) { +func NewConditionBuilder(conditionType gardencorev1beta1.ConditionType) (ConditionBuilder, error) { if conditionType == "" { return nil, fmt.Errorf("conditionType cannot be empty") } @@ -56,7 +59,7 @@ func NewConditionBuilder(conditionType api.ConditionType) (ConditionBuilder, err // WithOldCondition sets the old condition. It can be used to prodive default values. // The old's condition type is overridden to the one specified in the builder. -func (b *defaultConditionBuilder) WithOldCondition(old api.Condition) ConditionBuilder { +func (b *defaultConditionBuilder) WithOldCondition(old gardencorev1beta1.Condition) ConditionBuilder { old.Type = b.conditionType b.old = old @@ -64,7 +67,7 @@ func (b *defaultConditionBuilder) WithOldCondition(old api.Condition) ConditionB } // WithStatus sets the status of the condition. -func (b *defaultConditionBuilder) WithStatus(status api.ConditionStatus) ConditionBuilder { +func (b *defaultConditionBuilder) WithStatus(status gardencorev1beta1.ConditionStatus) ConditionBuilder { b.status = status return b } @@ -81,6 +84,12 @@ func (b *defaultConditionBuilder) WithMessage(message string) ConditionBuilder { return b } +// WithCodes sets the codes of the condition. +func (b *defaultConditionBuilder) WithCodes(codes ...gardencorev1beta1.ErrorCode) ConditionBuilder { + b.codes = codes + return b +} + // WithNowFunc sets the function used for getting the current time. // Should only be used for tests. func (b *defaultConditionBuilder) WithNowFunc(now func() metav1.Time) ConditionBuilder { @@ -92,7 +101,7 @@ func (b *defaultConditionBuilder) WithNowFunc(now func() metav1.Time) ConditionB // If OldCondition is provided: // - Any changes to status set the `LastTransitionTime` // - Any updates to the message or the reason cause set `LastUpdateTime` to the current time. -func (b *defaultConditionBuilder) Build() (new api.Condition, updated bool) { +func (b *defaultConditionBuilder) Build() (new gardencorev1beta1.Condition, updated bool) { var ( now = b.nowFunc() emptyTime = metav1.Time{} @@ -113,7 +122,7 @@ func (b *defaultConditionBuilder) Build() (new api.Condition, updated bool) { if b.status != "" { new.Status = b.status } else if b.status == "" && b.old.Status == "" { - new.Status = api.ConditionUnknown + new.Status = gardencorev1beta1.ConditionUnknown } if b.reason != "" { @@ -128,6 +137,12 @@ func (b *defaultConditionBuilder) Build() (new api.Condition, updated bool) { new.Message = "The condition has been initialized but its semantic check has not been performed yet." } + if b.codes != nil { + new.Codes = b.codes + } else if b.codes == nil && b.old.Codes == nil { + new.Codes = nil + } + if new.Status != b.old.Status { new.LastTransitionTime = now } diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/errors.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/errors.go index 201ead201..904497450 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/errors.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/errors.go @@ -25,31 +25,37 @@ import ( errors2 "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" ) -type errorWithCode struct { - code gardencorev1beta1.ErrorCode +// ErrorWithCodes contains error codes and an error message. +type ErrorWithCodes struct { message string + codes []gardencorev1beta1.ErrorCode } -// NewErrorWithCode creates a new error that additionally exposes the given code via the Coder interface. -func NewErrorWithCode(code gardencorev1beta1.ErrorCode, message string) error { - return &errorWithCode{code, message} +// NewErrorWithCodes creates a new error that additionally exposes the given codes via the Coder interface. +func NewErrorWithCodes(message string, codes ...gardencorev1beta1.ErrorCode) error { + return &ErrorWithCodes{message, codes} } -func (e *errorWithCode) Code() gardencorev1beta1.ErrorCode { - return e.code +// Codes returns all error codes. +func (e *ErrorWithCodes) Codes() []gardencorev1beta1.ErrorCode { + return e.codes } -func (e *errorWithCode) Error() string { +// Error returns the error message. +func (e *ErrorWithCodes) Error() string { return e.message } var ( - unauthorizedRegexp = regexp.MustCompile(`(?i)(Unauthorized|InvalidClientTokenId|SignatureDoesNotMatch|Authentication failed|AuthFailure|AuthorizationFailed|invalid character|invalid_grant|invalid_client|Authorization Profile was not found|cannot fetch token|no active subscriptions|InvalidAccessKeyId|InvalidSecretAccessKey)`) - quotaExceededRegexp = regexp.MustCompile(`(?i)(LimitExceeded|Quota)`) - insufficientPrivilegesRegexp = regexp.MustCompile(`(?i)(AccessDenied|Forbidden|deny|denied)`) - dependenciesRegexp = regexp.MustCompile(`(?i)(PendingVerification|Access Not Configured|accessNotConfigured|DependencyViolation|OptInRequired|DeleteConflict|Conflict|inactive billing state|ReadOnlyDisabledSubscription|is already being used|InUseSubnetCannotBeDeleted|not available in the current hardware cluster)`) + unauthorizedRegexp = regexp.MustCompile(`(?i)(Unauthorized|InvalidClientTokenId|InvalidAuthenticationTokenTenant|SignatureDoesNotMatch|Authentication failed|AuthFailure|AuthorizationFailed|invalid character|invalid_grant|invalid_client|Authorization Profile was not found|cannot fetch token|no active subscriptions|InvalidAccessKeyId|InvalidSecretAccessKey|query returned no results|UnauthorizedOperation|not authorized)`) + quotaExceededRegexp = regexp.MustCompile(`(?i)(LimitExceeded|Quota|Throttling|Too many requests)`) + insufficientPrivilegesRegexp = regexp.MustCompile(`(?i)(AccessDenied|OperationNotAllowed|Error 403)`) + dependenciesRegexp = regexp.MustCompile(`(?i)(PendingVerification|Access Not Configured|accessNotConfigured|DependencyViolation|OptInRequired|DeleteConflict|Conflict|inactive billing state|ReadOnlyDisabledSubscription|is already being used|InUseSubnetCannotBeDeleted|VnetInUse|InUseRouteTableCannotBeDeleted|timeout while waiting for state to become|InvalidCidrBlock|already busy for|InsufficientFreeAddressesInSubnet|InternalServerError|RetryableError|Future#WaitForCompletion: context has been cancelled|internalerror|internal server error|A resource with the ID|VnetAddressSpaceCannotChangeDueToPeerings)`) + resourcesDepletedRegexp = regexp.MustCompile(`(?i)(not available in the current hardware cluster|InsufficientInstanceCapacity|SkuNotAvailable|ZonalAllocationFailed|out of stock)`) + configurationProblemRegexp = regexp.MustCompile(`(?i)(AzureBastionSubnet|not supported in your requested Availability Zone|InvalidParameter|InvalidParameterValue|notFound|NetcfgInvalidSubnet|InvalidSubnet|Invalid value|KubeletHasInsufficientMemory|KubeletHasDiskPressure|KubeletHasInsufficientPID|violates constraint|no attached internet gateway found|Your query returned no results|PrivateEndpointNetworkPoliciesCannotBeEnabledOnPrivateEndpointSubnet|invalid VPC attributes|PrivateLinkServiceNetworkPoliciesCannotBeEnabledOnPrivateLinkServiceSubnet|unrecognized feature gate|runtime-config invalid key|LoadBalancingRuleMustDisableSNATSinceSameFrontendIPConfigurationIsReferencedByOutboundRule)`) ) // DetermineError determines the Garden error code for the given error and creates a new error with the given message. @@ -63,43 +69,60 @@ func DetermineError(err error, message string) error { errMsg = err.Error() } - code := determineErrorCode(err) - if code == "" { + codes := DetermineErrorCodes(err) + if codes == nil { return errors.New(errMsg) } - return &errorWithCode{code, errMsg} + return &ErrorWithCodes{errMsg, codes} } -func determineErrorCode(err error) gardencorev1beta1.ErrorCode { - var coder Coder +// DetermineErrorCodes determines error codes based on the given error. +func DetermineErrorCodes(err error) []gardencorev1beta1.ErrorCode { + var ( + coder Coder + message = err.Error() + codes = sets.NewString() + ) - // first try to re-use code from error + // try to re-use codes from error if errors.As(err, &coder) { - switch coder.Code() { - case gardencorev1beta1.ErrorInfraUnauthorized, gardencorev1beta1.ErrorInfraQuotaExceeded, gardencorev1beta1.ErrorInfraInsufficientPrivileges, gardencorev1beta1.ErrorInfraDependencies: - return coder.Code() + for _, code := range coder.Codes() { + codes.Insert(string(code)) } } - message := err.Error() - switch { - case unauthorizedRegexp.MatchString(message): - return gardencorev1beta1.ErrorInfraUnauthorized - case quotaExceededRegexp.MatchString(message): - return gardencorev1beta1.ErrorInfraQuotaExceeded - case insufficientPrivilegesRegexp.MatchString(message): - return gardencorev1beta1.ErrorInfraInsufficientPrivileges - case dependenciesRegexp.MatchString(message): - return gardencorev1beta1.ErrorInfraDependencies - default: - return "" + // determine error codes + if unauthorizedRegexp.MatchString(message) { + codes.Insert(string(gardencorev1beta1.ErrorInfraUnauthorized)) } + if quotaExceededRegexp.MatchString(message) { + codes.Insert(string(gardencorev1beta1.ErrorInfraQuotaExceeded)) + } + if insufficientPrivilegesRegexp.MatchString(message) { + codes.Insert(string(gardencorev1beta1.ErrorInfraInsufficientPrivileges)) + } + if dependenciesRegexp.MatchString(message) { + codes.Insert(string(gardencorev1beta1.ErrorInfraDependencies)) + } + if resourcesDepletedRegexp.MatchString(message) { + codes.Insert(string(gardencorev1beta1.ErrorInfraResourcesDepleted)) + } + if configurationProblemRegexp.MatchString(message) { + codes.Insert(string(gardencorev1beta1.ErrorConfigurationProblem)) + } + + // compute error code list based on code string set + var out []gardencorev1beta1.ErrorCode + for _, c := range codes.List() { + out = append(out, gardencorev1beta1.ErrorCode(c)) + } + return out } -// Coder is an error that may produce an ErrorCode visible to the outside. +// Coder is an error that may produce a ErrorCodes visible to the outside. type Coder interface { error - Code() gardencorev1beta1.ErrorCode + Codes() []gardencorev1beta1.ErrorCode } // ExtractErrorCodes extracts all error codes from the given error by using utilerrors.Errors @@ -108,7 +131,7 @@ func ExtractErrorCodes(err error) []gardencorev1beta1.ErrorCode { for _, err := range utilerrors.Errors(err) { var coder Coder if errors.As(err, &coder) { - codes = append(codes, coder.Code()) + codes = append(codes, coder.Codes()...) } } return codes @@ -168,3 +191,16 @@ func LastErrorWithTaskID(description string, taskID string, codes ...gardencorev }, } } + +// HasNonRetryableErrorCode returns true if at least one of given list of last errors has at least one error code that +// indicates that an automatic retry would not help fixing the problem. +func HasNonRetryableErrorCode(lastErrors ...gardencorev1beta1.LastError) bool { + for _, lastError := range lastErrors { + for _, code := range lastError.Codes { + if code == gardencorev1beta1.ErrorInfraUnauthorized || code == gardencorev1beta1.ErrorConfigurationProblem { + return true + } + } + } + return false +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/helper.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/helper.go index 833df410e..7849607fc 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/helper.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/helper.go @@ -16,17 +16,23 @@ package helper import ( "fmt" - "sort" "strconv" "strings" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/utils/pointer" gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" - "github.com/gardener/gardener/pkg/logger" versionutils "github.com/gardener/gardener/pkg/utils/version" "github.com/Masterminds/semver" - errors "github.com/pkg/errors" + "github.com/pkg/errors" apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" @@ -37,33 +43,17 @@ var Now = metav1.Now // InitCondition initializes a new Condition with an Unknown status. func InitCondition(conditionType gardencorev1beta1.ConditionType) gardencorev1beta1.Condition { + now := Now() return gardencorev1beta1.Condition{ Type: conditionType, Status: gardencorev1beta1.ConditionUnknown, Reason: "ConditionInitialized", Message: "The condition has been initialized but its semantic check has not been performed yet.", - LastTransitionTime: Now(), + LastTransitionTime: now, + LastUpdateTime: now, } } -// NewConditions initializes the provided conditions based on an existing list. If a condition type does not exist -// in the list yet, it will be set to default values. -func NewConditions(conditions []gardencorev1beta1.Condition, conditionTypes ...gardencorev1beta1.ConditionType) []*gardencorev1beta1.Condition { - newConditions := []*gardencorev1beta1.Condition{} - - // We retrieve the current conditions in order to update them appropriately. - for _, conditionType := range conditionTypes { - if c := GetCondition(conditions, conditionType); c != nil { - newConditions = append(newConditions, c) - continue - } - initializedCondition := InitCondition(conditionType) - newConditions = append(newConditions, &initializedCondition) - } - - return newConditions -} - // GetCondition returns the condition with the given out of the list of . // In case the required type could not be found, it returns nil. func GetCondition(conditions []gardencorev1beta1.Condition, conditionType gardencorev1beta1.ConditionType) *gardencorev1beta1.Condition { @@ -86,28 +76,39 @@ func GetOrInitCondition(conditions []gardencorev1beta1.Condition, conditionType } // UpdatedCondition updates the properties of one specific condition. -func UpdatedCondition(condition gardencorev1beta1.Condition, status gardencorev1beta1.ConditionStatus, reason, message string) gardencorev1beta1.Condition { - newCondition := gardencorev1beta1.Condition{ - Type: condition.Type, - Status: status, - Reason: reason, - Message: message, - LastTransitionTime: condition.LastTransitionTime, - LastUpdateTime: Now(), - } +func UpdatedCondition(condition gardencorev1beta1.Condition, status gardencorev1beta1.ConditionStatus, reason, message string, codes ...gardencorev1beta1.ErrorCode) gardencorev1beta1.Condition { + var ( + newCondition = gardencorev1beta1.Condition{ + Type: condition.Type, + Status: status, + Reason: reason, + Message: message, + LastTransitionTime: condition.LastTransitionTime, + LastUpdateTime: condition.LastUpdateTime, + Codes: codes, + } + now = Now() + ) if condition.Status != status { - newCondition.LastTransitionTime = Now() + newCondition.LastTransitionTime = now + } + + if condition.Reason != reason || condition.Message != message || !apiequality.Semantic.DeepEqual(condition.Codes, codes) { + newCondition.LastUpdateTime = now } + return newCondition } -func UpdatedConditionUnknownError(condition gardencorev1beta1.Condition, err error) gardencorev1beta1.Condition { - return UpdatedConditionUnknownErrorMessage(condition, err.Error()) +// UpdatedConditionUnknownError updates the condition to 'Unknown' status and the message of the given error. +func UpdatedConditionUnknownError(condition gardencorev1beta1.Condition, err error, codes ...gardencorev1beta1.ErrorCode) gardencorev1beta1.Condition { + return UpdatedConditionUnknownErrorMessage(condition, err.Error(), codes...) } -func UpdatedConditionUnknownErrorMessage(condition gardencorev1beta1.Condition, message string) gardencorev1beta1.Condition { - return UpdatedCondition(condition, gardencorev1beta1.ConditionUnknown, gardencorev1beta1.ConditionCheckError, message) +// UpdatedConditionUnknownErrorMessage updates the condition with 'Unknown' status and the given message. +func UpdatedConditionUnknownErrorMessage(condition gardencorev1beta1.Condition, message string, codes ...gardencorev1beta1.ErrorCode) gardencorev1beta1.Condition { + return UpdatedCondition(condition, gardencorev1beta1.ConditionUnknown, gardencorev1beta1.ConditionCheckError, message, codes...) } // MergeConditions merges the given with the . Existing conditions are superseded by @@ -170,7 +171,17 @@ func IsControllerInstallationSuccessful(controllerInstallation gardencorev1beta1 return installed && healthy } -// ComputeOperationType checksthe and determines whether is it is Create operation or reconcile operation +// IsControllerInstallationRequired returns true if a ControllerInstallation has been marked as "required". +func IsControllerInstallationRequired(controllerInstallation gardencorev1beta1.ControllerInstallation) bool { + for _, condition := range controllerInstallation.Status.Conditions { + if condition.Type == gardencorev1beta1.ControllerInstallationRequired && condition.Status == gardencorev1beta1.ConditionTrue { + return true + } + } + return false +} + +// ComputeOperationType checks the and determines whether it is Create, Delete, Reconcile, Migrate or Restore operation func ComputeOperationType(meta metav1.ObjectMeta, lastOperation *gardencorev1beta1.LastOperation) gardencorev1beta1.LastOperationType { switch { case meta.Annotations[v1beta1constants.GardenerOperation] == v1beta1constants.GardenerOperationMigrate: @@ -179,10 +190,12 @@ func ComputeOperationType(meta metav1.ObjectMeta, lastOperation *gardencorev1bet return gardencorev1beta1.LastOperationTypeDelete case lastOperation == nil: return gardencorev1beta1.LastOperationTypeCreate - case (lastOperation.Type == gardencorev1beta1.LastOperationTypeCreate && lastOperation.State != gardencorev1beta1.LastOperationStateSucceeded): + case lastOperation.Type == gardencorev1beta1.LastOperationTypeCreate && lastOperation.State != gardencorev1beta1.LastOperationStateSucceeded: return gardencorev1beta1.LastOperationTypeCreate - case (lastOperation.Type == gardencorev1beta1.LastOperationTypeMigrate && lastOperation.State != gardencorev1beta1.LastOperationStateSucceeded): + case lastOperation.Type == gardencorev1beta1.LastOperationTypeMigrate && lastOperation.State != gardencorev1beta1.LastOperationStateSucceeded: return gardencorev1beta1.LastOperationTypeMigrate + case lastOperation.Type == gardencorev1beta1.LastOperationTypeRestore && lastOperation.State != gardencorev1beta1.LastOperationStateSucceeded: + return gardencorev1beta1.LastOperationTypeRestore } return gardencorev1beta1.LastOperationTypeReconcile } @@ -197,19 +210,55 @@ func TaintsHave(taints []gardencorev1beta1.SeedTaint, key string) bool { return false } +// TaintsAreTolerated returns true when all the given taints are tolerated by the given tolerations. +func TaintsAreTolerated(taints []gardencorev1beta1.SeedTaint, tolerations []gardencorev1beta1.Toleration) bool { + if len(taints) == 0 { + return true + } + if len(taints) > len(tolerations) { + return false + } + + tolerationKeyValues := make(map[string]string, len(tolerations)) + for _, toleration := range tolerations { + v := "" + if toleration.Value != nil { + v = *toleration.Value + } + tolerationKeyValues[toleration.Key] = v + } + + for _, taint := range taints { + tolerationValue, ok := tolerationKeyValues[taint.Key] + if !ok { + return false + } + if taint.Value != nil && *taint.Value != tolerationValue { + return false + } + } + + return true +} + type ShootedSeed struct { - DisableDNS *bool - DisableCapacityReservation *bool - Protected *bool - Visible *bool - MinimumVolumeSize *string - APIServer *ShootedSeedAPIServer - BlockCIDRs []string - ShootDefaults *gardencorev1beta1.ShootNetworks - Backup *gardencorev1beta1.SeedBackup - NoGardenlet bool - UseServiceAccountBootstrapping bool - WithSecretRef bool + DisableDNS *bool + DisableCapacityReservation *bool + Protected *bool + Visible *bool + LoadBalancerServicesAnnotations map[string]string + MinimumVolumeSize *string + APIServer *ShootedSeedAPIServer + BlockCIDRs []string + ShootDefaults *gardencorev1beta1.ShootNetworks + Backup *gardencorev1beta1.SeedBackup + SeedProviderConfig *runtime.RawExtension + IngressController *gardencorev1beta1.IngressController + NoGardenlet bool + UseServiceAccountBootstrapping bool + WithSecretRef bool + FeatureGates map[string]bool + Resources *ShootedSeedResources } type ShootedSeedAPIServer struct { @@ -222,6 +271,11 @@ type ShootedSeedAPIServerAutoscaler struct { MaxReplicas int32 } +type ShootedSeedResources struct { + Capacity corev1.ResourceList + Reserved corev1.ResourceList +} + func parseInt32(s string) (int32, error) { i64, err := strconv.ParseInt(s, 10, 32) if err != nil { @@ -234,11 +288,6 @@ func parseShootedSeed(annotation string) (*ShootedSeed, error) { var ( flags = make(map[string]struct{}) settings = make(map[string]string) - - trueVar = true - falseVar = false - - shootedSeed ShootedSeed ) for _, fragment := range strings.Split(annotation, ",") { @@ -255,6 +304,11 @@ func parseShootedSeed(annotation string) (*ShootedSeed, error) { return nil, nil } + shootedSeed := ShootedSeed{ + LoadBalancerServicesAnnotations: parseShootedSeedLoadBalancerServicesAnnotations(settings), + FeatureGates: parseShootedSeedFeatureGates(settings), + } + apiServer, err := parseShootedSeedAPIServer(settings) if err != nil { return nil, err @@ -279,15 +333,32 @@ func parseShootedSeed(annotation string) (*ShootedSeed, error) { } shootedSeed.Backup = backup + seedProviderConfig, err := parseProviderConfig("providerConfig.", settings) + if err != nil { + return nil, err + } + shootedSeed.SeedProviderConfig = seedProviderConfig + + resources, err := parseShootedSeedResources(settings) + if err != nil { + return nil, err + } + shootedSeed.Resources = resources + + ingressController, err := parseIngressController(settings) + if err != nil { + return nil, err + } + shootedSeed.IngressController = ingressController + if size, ok := settings["minimumVolumeSize"]; ok { shootedSeed.MinimumVolumeSize = &size } - if _, ok := flags["disable-dns"]; ok { - shootedSeed.DisableDNS = &trueVar + shootedSeed.DisableDNS = pointer.BoolPtr(true) } if _, ok := flags["disable-capacity-reservation"]; ok { - shootedSeed.DisableCapacityReservation = &trueVar + shootedSeed.DisableCapacityReservation = pointer.BoolPtr(true) } if _, ok := flags["no-gardenlet"]; ok { shootedSeed.NoGardenlet = true @@ -298,18 +369,17 @@ func parseShootedSeed(annotation string) (*ShootedSeed, error) { if _, ok := flags["with-secret-ref"]; ok { shootedSeed.WithSecretRef = true } - if _, ok := flags["protected"]; ok { - shootedSeed.Protected = &trueVar + shootedSeed.Protected = pointer.BoolPtr(true) } if _, ok := flags["unprotected"]; ok { - shootedSeed.Protected = &falseVar + shootedSeed.Protected = pointer.BoolPtr(false) } if _, ok := flags["visible"]; ok { - shootedSeed.Visible = &trueVar + shootedSeed.Visible = pointer.BoolPtr(true) } if _, ok := flags["invisible"]; ok { - shootedSeed.Visible = &falseVar + shootedSeed.Visible = pointer.BoolPtr(false) } return &shootedSeed, nil @@ -347,6 +417,24 @@ func parseShootedSeedShootDefaults(settings map[string]string) (*gardencorev1bet return shootNetworks, nil } +func parseIngressController(settings map[string]string) (*gardencorev1beta1.IngressController, error) { + ingressController := &gardencorev1beta1.IngressController{} + + kind, ok := settings["ingress.controller.kind"] + if !ok { + return nil, nil + } + ingressController.Kind = kind + + parsedProviderConfig, err := parseProviderConfig("ingress.controller.providerConfig.", settings) + if err != nil { + return nil, fmt.Errorf("parsing Ingress providerConfig failed: %s", err.Error()) + } + ingressController.ProviderConfig = parsedProviderConfig + + return ingressController, nil +} + func parseShootedSeedBackup(settings map[string]string) (*gardencorev1beta1.SeedBackup, error) { var ( provider, ok1 = settings["backup.provider"] @@ -377,6 +465,83 @@ func parseShootedSeedBackup(settings map[string]string) (*gardencorev1beta1.Seed return backup, nil } +func parseShootedSeedFeatureGates(settings map[string]string) map[string]bool { + featureGates := make(map[string]bool) + + for k, v := range settings { + if strings.HasPrefix(k, "featureGates.") { + val, _ := strconv.ParseBool(v) + featureGates[strings.Split(k, ".")[1]] = val + } + } + + if len(featureGates) == 0 { + return nil + } + + return featureGates +} + +func parseProviderConfig(prefix string, settings map[string]string) (*runtime.RawExtension, error) { + // reconstruct providerConfig structure + providerConfig := map[string]interface{}{} + + var err error + for k, v := range settings { + if strings.HasPrefix(k, prefix) { + var value interface{} + if strings.HasPrefix(v, `"`) && strings.HasSuffix(v, `"`) { + value, err = strconv.Unquote(v) + if err != nil { + return nil, err + } + } else if b, err := strconv.ParseBool(v); err == nil { + value = b + } else if f, err := strconv.ParseFloat(v, 64); err == nil { + value = f + } else { + value = v + } + + path := strings.TrimPrefix(k, prefix) + if err := unstructured.SetNestedField(providerConfig, value, strings.Split(path, ".")...); err != nil { + return nil, err + } + } + } + + if len(providerConfig) == 0 { + return nil, nil + } + + jsonStr, err := json.Marshal(providerConfig) + if err != nil { + return nil, err + } + + return &runtime.RawExtension{ + Raw: jsonStr, + }, nil +} + +func parseShootedSeedLoadBalancerServicesAnnotations(settings map[string]string) map[string]string { + const optionPrefix = "loadBalancerServices.annotations." + + annotations := make(map[string]string) + for k, v := range settings { + if strings.HasPrefix(k, optionPrefix) { + annotationKey := strings.TrimPrefix(k, optionPrefix) + annotations[annotationKey] = v + } + } + + if len(annotations) == 0 { + return nil + } + + return annotations +} + func parseShootedSeedAPIServer(settings map[string]string) (*ShootedSeedAPIServer, error) { apiServerAutoscaler, err := parseShootedSeedAPIServerAutoscaler(settings) if err != nil { @@ -433,11 +598,50 @@ func parseShootedSeedAPIServerAutoscaler(settings map[string]string) (*ShootedSe return &apiServerAutoscaler, nil } +func parseShootedSeedResources(settings map[string]string) (*ShootedSeedResources, error) { + var capacity, reserved corev1.ResourceList + + for k, v := range settings { + var resourceName corev1.ResourceName + var quantity resource.Quantity + var err error + if strings.HasPrefix(k, "resources.capacity.") || strings.HasPrefix(k, "resources.reserved.") { + resourceName = corev1.ResourceName(strings.Split(k, ".")[2]) + quantity, err = resource.ParseQuantity(v) + if err != nil { + return nil, err + } + if strings.HasPrefix(k, "resources.capacity.") { + if capacity == nil { + capacity = make(corev1.ResourceList) + } + capacity[resourceName] = quantity + } else { + if reserved == nil { + reserved = make(corev1.ResourceList) + } + reserved[resourceName] = quantity + } + } + } + + if len(capacity) == 0 && len(reserved) == 0 { + return nil, nil + } + return &ShootedSeedResources{ + Capacity: capacity, + Reserved: reserved, + }, nil +} + func validateShootedSeed(shootedSeed *ShootedSeed, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if shootedSeed.APIServer != nil { - allErrs = validateShootedSeedAPIServer(shootedSeed.APIServer, fldPath.Child("apiServer")) + allErrs = append(allErrs, validateShootedSeedAPIServer(shootedSeed.APIServer, fldPath.Child("apiServer"))...) + } + if shootedSeed.Resources != nil { + allErrs = append(allErrs, validateShootedSeedResources(shootedSeed.Resources, fldPath.Child("resources"))...) } return allErrs @@ -472,11 +676,32 @@ func validateShootedSeedAPIServerAutoscaler(autoscaler *ShootedSeedAPIServerAuto return allErrs } +func validateShootedSeedResources(resources *ShootedSeedResources, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + for resourceName, quantity := range resources.Capacity { + if reservedQuantity, ok := resources.Reserved[resourceName]; ok && reservedQuantity.Value() > quantity.Value() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("reserved", string(resourceName)), resources.Reserved[resourceName], "must be lower or equal to capacity")) + } + } + for resourceName := range resources.Reserved { + if _, ok := resources.Capacity[resourceName]; !ok { + allErrs = append(allErrs, field.Invalid(fldPath.Child("reserved", string(resourceName)), resources.Reserved[resourceName], "reserved without capacity")) + } + } + + return allErrs +} + func setDefaults_ShootedSeed(shootedSeed *ShootedSeed) { if shootedSeed.APIServer == nil { shootedSeed.APIServer = &ShootedSeedAPIServer{} } setDefaults_ShootedSeedAPIServer(shootedSeed.APIServer) + if shootedSeed.Resources == nil { + shootedSeed.Resources = &ShootedSeedResources{} + } + setDefaults_ShootedSeedResources(shootedSeed.Resources) } func setDefaults_ShootedSeedAPIServer(apiServer *ShootedSeedAPIServer) { @@ -506,13 +731,22 @@ func setDefaults_ShootedSeedAPIServerAutoscaler(autoscaler *ShootedSeedAPIServer } } +func setDefaults_ShootedSeedResources(resources *ShootedSeedResources) { + if _, ok := resources.Capacity[gardencorev1beta1.ResourceShoots]; !ok { + if resources.Capacity == nil { + resources.Capacity = make(corev1.ResourceList) + } + resources.Capacity[gardencorev1beta1.ResourceShoots] = resource.MustParse("250") + } +} + // ReadShootedSeed determines whether the Shoot has been marked to be registered automatically as a Seed cluster. func ReadShootedSeed(shoot *gardencorev1beta1.Shoot) (*ShootedSeed, error) { if shoot.Namespace != v1beta1constants.GardenNamespace || shoot.Annotations == nil { return nil, nil } - val, ok := v1beta1constants.GetShootUseAsSeedAnnotation(shoot.Annotations) + val, ok := shoot.Annotations[v1beta1constants.AnnotationShootUseAsSeed] if !ok { return nil, nil } @@ -552,15 +786,25 @@ func ShootWantsClusterAutoscaler(shoot *gardencorev1beta1.Shoot) (bool, error) { return false, nil } +// ShootWantsVerticalPodAutoscaler checks if the given Shoot needs a VPA. +func ShootWantsVerticalPodAutoscaler(shoot *gardencorev1beta1.Shoot) bool { + return shoot.Spec.Kubernetes.VerticalPodAutoscaler != nil && shoot.Spec.Kubernetes.VerticalPodAutoscaler.Enabled +} + // ShootIgnoresAlerts checks if the alerts for the annotated shoot cluster should be ignored. func ShootIgnoresAlerts(shoot *gardencorev1beta1.Shoot) bool { ignore := false - if value, ok := v1beta1constants.GetShootIgnoreAlertsAnnotation(shoot.Annotations); ok { + if value, ok := shoot.Annotations[v1beta1constants.AnnotationShootIgnoreAlerts]; ok { ignore, _ = strconv.ParseBool(value) } return ignore } +// ShootWantsAlertManager checks if the given shoot specification requires an alert manager. +func ShootWantsAlertManager(shoot *gardencorev1beta1.Shoot) bool { + return !ShootIgnoresAlerts(shoot) && shoot.Spec.Monitoring != nil && shoot.Spec.Monitoring.Alerting != nil && len(shoot.Spec.Monitoring.Alerting.EmailReceivers) > 0 +} + // ShootWantsBasicAuthentication returns true if basic authentication is not configured or // if it is set explicitly to 'true'. func ShootWantsBasicAuthentication(shoot *gardencorev1beta1.Shoot) bool { @@ -579,17 +823,6 @@ func ShootUsesUnmanagedDNS(shoot *gardencorev1beta1.Shoot) bool { return shoot.Spec.DNS != nil && len(shoot.Spec.DNS.Providers) > 0 && shoot.Spec.DNS.Providers[0].Type != nil && *shoot.Spec.DNS.Providers[0].Type == "unmanaged" } -// GetMachineImagesFor returns a list of all machine images for a given shoot. -func GetMachineImagesFor(shoot *gardencorev1beta1.Shoot) []*gardencorev1beta1.ShootMachineImage { - var workerMachineImages []*gardencorev1beta1.ShootMachineImage - for _, worker := range shoot.Spec.Provider.Workers { - if worker.Machine.Image != nil { - workerMachineImages = append(workerMachineImages, worker.Machine.Image) - } - } - return workerMachineImages -} - // DetermineMachineImageForName finds the cloud specific machine images in the for the given and // region. In case it does not find the machine image with the , it returns false. Otherwise, true and the // cloud-specific machine image will be returned. @@ -617,33 +850,31 @@ func ShootMachineImageVersionExists(constraint gardencorev1beta1.MachineImage, i return false, 0 } -// DetermineLatestMachineImageVersion determines the latest MachineImageVersion from a MachineImage -func DetermineLatestMachineImageVersion(image gardencorev1beta1.MachineImage) (*semver.Version, gardencorev1beta1.ExpirableVersion, error) { - var ( - latestSemVerVersion *semver.Version - latestMachineImageVersion gardencorev1beta1.ExpirableVersion - ) - - for _, imageVersion := range image.Versions { - v, err := semver.NewVersion(imageVersion.Version) - if err != nil { - return nil, gardencorev1beta1.ExpirableVersion{}, fmt.Errorf("error while parsing machine image version '%s' of machine image '%s': version not valid: %s", imageVersion.Version, image.Name, err.Error()) - } - if latestSemVerVersion == nil || v.GreaterThan(latestSemVerVersion) { - latestSemVerVersion = v - latestMachineImageVersion = imageVersion - } +func toExpirableVersions(versions []gardencorev1beta1.MachineImageVersion) []gardencorev1beta1.ExpirableVersion { + expVersions := []gardencorev1beta1.ExpirableVersion{} + for _, version := range versions { + expVersions = append(expVersions, version.ExpirableVersion) } - return latestSemVerVersion, latestMachineImageVersion, nil + return expVersions } -// GetShootMachineImageFromLatestMachineImageVersion determines the latest version in a machine image and returns that as a ShootMachineImage -func GetShootMachineImageFromLatestMachineImageVersion(image gardencorev1beta1.MachineImage) (*semver.Version, gardencorev1beta1.ShootMachineImage, error) { - latestSemVerVersion, latestImage, err := DetermineLatestMachineImageVersion(image) +// GetLatestQualifyingShootMachineImage determines the latest qualifying version in a machine image and returns that as a ShootMachineImage +// A version qualifies if its classification is not preview and the version is not expired. +func GetLatestQualifyingShootMachineImage(image gardencorev1beta1.MachineImage, predicates ...VersionPredicate) (bool, *gardencorev1beta1.ShootMachineImage, error) { + predicates = append(predicates, FilterExpiredVersion()) + qualifyingVersionFound, latestImageVersion, err := GetLatestQualifyingVersion(toExpirableVersions(image.Versions), predicates...) if err != nil { - return nil, gardencorev1beta1.ShootMachineImage{}, err + return false, nil, err + } + if !qualifyingVersionFound { + return false, nil, nil } - return latestSemVerVersion, gardencorev1beta1.ShootMachineImage{Name: image.Name, Version: &latestImage.Version}, nil + return true, &gardencorev1beta1.ShootMachineImage{Name: image.Name, Version: &latestImageVersion.Version}, nil +} + +// SystemComponentsAllowed checks if the given worker allows system components to be scheduled onto it +func SystemComponentsAllowed(worker *gardencorev1beta1.Worker) bool { + return worker.SystemComponents == nil || worker.SystemComponents.Allow } // UpdateMachineImages updates the machine images in place. @@ -651,7 +882,6 @@ func UpdateMachineImages(workers []gardencorev1beta1.Worker, machineImages []*ga for _, machineImage := range machineImages { for idx, worker := range workers { if worker.Machine.Image != nil && machineImage.Name == worker.Machine.Image.Name { - logger.Logger.Infof("Updating worker images of worker '%s' from version %s to version %s", worker.Name, *worker.Machine.Image.Version, *machineImage.Version) workers[idx].Machine.Image = machineImage } } @@ -672,60 +902,8 @@ func KubernetesVersionExistsInCloudProfile(cloudProfile *gardencorev1beta1.Cloud return false, gardencorev1beta1.ExpirableVersion{}, nil } -// DetermineLatestKubernetesPatchVersion finds the latest Kubernetes patch version in the compared -// to the given . In case it does not find a newer patch version, it returns false. Otherwise, -// true and the found version will be returned. -func DetermineLatestKubernetesPatchVersion(cloudProfile *gardencorev1beta1.CloudProfile, currentVersion string) (bool, string, error) { - ok, newerVersions, _, err := determineNextKubernetesVersions(cloudProfile, currentVersion, "~") - if err != nil || !ok { - return ok, "", err - } - sort.Strings(newerVersions) - return true, newerVersions[len(newerVersions)-1], nil -} - -// DetermineNextKubernetesMinorVersion finds the next available Kubernetes minor version in the compared -// to the given . In case it does not find a newer minor version, it returns false. Otherwise, -// true and the found version will be returned. -func DetermineNextKubernetesMinorVersion(cloudProfile *gardencorev1beta1.CloudProfile, currentVersion string) (bool, string, error) { - ok, newerVersions, _, err := determineNextKubernetesVersions(cloudProfile, currentVersion, "^") - if err != nil || !ok { - return ok, "", err - } - sort.Strings(newerVersions) - return true, newerVersions[0], nil -} - -// determineKubernetesVersions finds newer Kubernetes versions in the compared -// with the to the given . The has to be a github.com/Masterminds/semver -// range comparison symbol. In case it does not find a newer version, it returns false. Otherwise, -// true and the found version will be returned. -func determineNextKubernetesVersions(cloudProfile *gardencorev1beta1.CloudProfile, currentVersion, operator string) (bool, []string, []gardencorev1beta1.ExpirableVersion, error) { - var ( - newerVersions = []gardencorev1beta1.ExpirableVersion{} - newerVersionsString = []string{} - ) - - for _, version := range cloudProfile.Spec.Kubernetes.Versions { - ok, err := versionutils.CompareVersions(version.Version, operator, currentVersion) - if err != nil { - return false, []string{}, []gardencorev1beta1.ExpirableVersion{}, err - } - if version.Version != currentVersion && ok { - newerVersions = append(newerVersions, version) - newerVersionsString = append(newerVersionsString, version.Version) - } - } - - if len(newerVersions) == 0 { - return false, []string{}, []gardencorev1beta1.ExpirableVersion{}, nil - } - - return true, newerVersionsString, newerVersions, nil -} - // SetMachineImageVersionsToMachineImage sets imageVersions to the matching imageName in the machineImages. -func SetMachineImageVersionsToMachineImage(machineImages []gardencorev1beta1.MachineImage, imageName string, imageVersions []gardencorev1beta1.ExpirableVersion) ([]gardencorev1beta1.MachineImage, error) { +func SetMachineImageVersionsToMachineImage(machineImages []gardencorev1beta1.MachineImage, imageName string, imageVersions []gardencorev1beta1.MachineImageVersion) ([]gardencorev1beta1.MachineImage, error) { for index, image := range machineImages { if strings.EqualFold(image.Name, imageName) { machineImages[index].Versions = imageVersions @@ -776,9 +954,245 @@ func FindPrimaryDNSProvider(providers []gardencorev1beta1.DNSProvider) *gardenco return &primaryProvider } } - // TODO: timuthy - Only required for migration and can be removed in a future version. - if len(providers) > 0 { - return &providers[0] + return nil +} + +type VersionPredicate func(expirableVersion gardencorev1beta1.ExpirableVersion, version *semver.Version) (bool, error) + +// GetKubernetesVersionForPatchUpdate finds the latest Kubernetes patch version for its minor version in the compared +// to the given . Preview and expired versions do not qualify for the kubernetes patch update. In case it does not find a newer patch version, it returns false. Otherwise, +// true and the found version will be returned. +func GetKubernetesVersionForPatchUpdate(cloudProfile *gardencorev1beta1.CloudProfile, currentVersion string) (bool, string, error) { + currentSemVerVersion, err := semver.NewVersion(currentVersion) + if err != nil { + return false, "", err + } + + qualifyingVersionFound, latestVersion, err := GetLatestQualifyingVersion(cloudProfile.Spec.Kubernetes.Versions, FilterDifferentMajorMinorVersion(*currentSemVerVersion), FilterSameVersion(*currentSemVerVersion), FilterExpiredVersion()) + if err != nil { + return false, "", err + } + // latest version cannot be found. Do not return an error, but allow for minor upgrade if Shoot's machine image version is expired. + if !qualifyingVersionFound { + return false, "", nil + } + + return true, latestVersion.Version, nil +} + +// GetKubernetesVersionForMinorUpdate finds a Kubernetes version in the that qualifies for a Kubernetes minor level update given a . +// A qualifying version is a non-preview version having the minor version increased by exactly one version. +// In case the consecutive minor version has only expired versions, picks the latest expired version (will do another minor update during the next maintenance time) +// If a version can be found, returns true and the qualifying patch version of the next minor version. +// In case it does not find a version, it returns false. +func GetKubernetesVersionForMinorUpdate(cloudProfile *gardencorev1beta1.CloudProfile, currentVersion string) (bool, string, error) { + currentSemVerVersion, err := semver.NewVersion(currentVersion) + if err != nil { + return false, "", err + } + + qualifyingVersionFound, latestVersion, err := GetLatestQualifyingVersion(cloudProfile.Spec.Kubernetes.Versions, FilterNonConsecutiveMinorVersion(*currentSemVerVersion), FilterSameVersion(*currentSemVerVersion), FilterExpiredVersion()) + if err != nil { + return false, "", err + } + if !qualifyingVersionFound { + // in case there are only expired versions in the consecutive minor version, pick the latest expired version + qualifyingVersionFound, latestVersion, err = GetLatestQualifyingVersion(cloudProfile.Spec.Kubernetes.Versions, FilterNonConsecutiveMinorVersion(*currentSemVerVersion), FilterSameVersion(*currentSemVerVersion)) + if err != nil { + return false, "", err + } + if !qualifyingVersionFound { + return false, "", nil + } + } + + return true, latestVersion.Version, nil +} + +// GetLatestQualifyingVersion returns the latest expirable version from a set of expirable versions +// A version qualifies if its classification is not preview and the optional predicate does not filter out the version. +// If the predicate returns true, the version is not considered for the latest qualifying version. +func GetLatestQualifyingVersion(versions []gardencorev1beta1.ExpirableVersion, predicate ...VersionPredicate) (qualifyingVersionFound bool, latest *gardencorev1beta1.ExpirableVersion, err error) { + latestSemanticVersion := &semver.Version{} + var latestVersion *gardencorev1beta1.ExpirableVersion +OUTER: + for _, v := range versions { + if v.Classification != nil && *v.Classification == gardencorev1beta1.ClassificationPreview { + continue + } + + semver, err := semver.NewVersion(v.Version) + if err != nil { + return false, nil, fmt.Errorf("error while parsing version '%s': %s", v.Version, err.Error()) + } + + for _, p := range predicate { + if p == nil { + continue + } + + shouldFilter, err := p(v, semver) + if err != nil { + return false, nil, fmt.Errorf("error while evaluation predicate: '%s'", err.Error()) + } + if shouldFilter { + continue OUTER + } + } + + if semver.GreaterThan(latestSemanticVersion) { + latestSemanticVersion = semver + // avoid DeepCopy + latest := v + latestVersion = &latest + } + } + // unable to find qualified versions + if latestVersion == nil { + return false, nil, nil + } + return true, latestVersion, nil +} + +// FilterDifferentMajorMinorVersion returns a VersionPredicate(closure) that evaluates whether a given version v has a different same major.minor version compared to the currentSemVerVersion +// returns true if v has a different major.minor version +func FilterDifferentMajorMinorVersion(currentSemVerVersion semver.Version) VersionPredicate { + return func(_ gardencorev1beta1.ExpirableVersion, v *semver.Version) (bool, error) { + isWithinRange, err := versionutils.CompareVersions(v.String(), "~", currentSemVerVersion.String()) + if err != nil { + return true, err + } + return !isWithinRange, nil + } +} + +// FilterNonConsecutiveMinorVersion returns a VersionPredicate(closure) that evaluates whether a given version v has a consecutive minor version compared to the currentSemVerVersion +// returns true if v does not have a consecutive minor version +func FilterNonConsecutiveMinorVersion(currentSemVerVersion semver.Version) VersionPredicate { + return func(_ gardencorev1beta1.ExpirableVersion, v *semver.Version) (bool, error) { + isWithinRange, err := versionutils.CompareVersions(v.String(), "^", currentSemVerVersion.String()) + if err != nil { + return true, err + } + + if !isWithinRange { + return true, nil + } + + hasIncorrectMinor := currentSemVerVersion.Minor()+1 != v.Minor() + return hasIncorrectMinor, nil + } +} + +// FilterSameVersion returns a VersionPredicate(closure) that evaluates whether a given version v is equal to the currentSemVerVersion +// returns true it it is equal +func FilterSameVersion(currentSemVerVersion semver.Version) VersionPredicate { + return func(_ gardencorev1beta1.ExpirableVersion, v *semver.Version) (bool, error) { + return v.Equal(¤tSemVerVersion), nil + } +} + +// FilterLowerVersion returns a VersionPredicate(closure) that evaluates whether a given version v is lower than the currentSemVerVersion +// returns true if it is lower +func FilterLowerVersion(currentSemVerVersion semver.Version) VersionPredicate { + return func(_ gardencorev1beta1.ExpirableVersion, v *semver.Version) (bool, error) { + return v.LessThan(¤tSemVerVersion), nil + } +} + +// FilterExpiredVersion returns a closure that evaluates whether a given expirable version is expired +// returns true it it is expired +func FilterExpiredVersion() func(expirableVersion gardencorev1beta1.ExpirableVersion, version *semver.Version) (bool, error) { + return func(expirableVersion gardencorev1beta1.ExpirableVersion, _ *semver.Version) (bool, error) { + return expirableVersion.ExpirationDate != nil && (time.Now().UTC().After(expirableVersion.ExpirationDate.UTC()) || time.Now().UTC().Equal(expirableVersion.ExpirationDate.UTC())), nil + } +} + +// GetResourceByName returns the first NamedResourceReference with the given name in the given slice, or nil if not found. +func GetResourceByName(resources []gardencorev1beta1.NamedResourceReference, name string) *gardencorev1beta1.NamedResourceReference { + for _, resource := range resources { + if resource.Name == name { + return &resource + } } return nil } + +// UpsertLastError adds a 'last error' to the given list of existing 'last errors' if it does not exist yet. Otherwise, +// it updates it. +func UpsertLastError(lastErrors []gardencorev1beta1.LastError, lastError gardencorev1beta1.LastError) []gardencorev1beta1.LastError { + var ( + out []gardencorev1beta1.LastError + found bool + ) + + for _, lastErr := range lastErrors { + if lastErr.TaskID != nil && lastError.TaskID != nil && *lastErr.TaskID == *lastError.TaskID { + out = append(out, lastError) + found = true + } else { + out = append(out, lastErr) + } + } + + if !found { + out = append(out, lastError) + } + + return out +} + +// DeleteLastErrorByTaskID removes the 'last error' with the given task ID from the given 'last error' list. +func DeleteLastErrorByTaskID(lastErrors []gardencorev1beta1.LastError, taskID string) []gardencorev1beta1.LastError { + var out []gardencorev1beta1.LastError + for _, lastErr := range lastErrors { + if lastErr.TaskID == nil || taskID != *lastErr.TaskID { + out = append(out, lastErr) + } + } + return out +} + +// ShootItems provides helper functions with ShootLists +type ShootItems gardencorev1beta1.ShootList + +// Union returns a set of Shoots that presents either in s or shootList +func (s *ShootItems) Union(shootItems *ShootItems) []gardencorev1beta1.Shoot { + unionedShoots := make(map[string]gardencorev1beta1.Shoot) + for _, s := range s.Items { + unionedShoots[objectKey(s.Namespace, s.Name)] = s + } + + for _, s := range shootItems.Items { + unionedShoots[objectKey(s.Namespace, s.Name)] = s + } + + shoots := make([]gardencorev1beta1.Shoot, 0, len(unionedShoots)) + for _, v := range unionedShoots { + shoots = append(shoots, v) + } + + return shoots +} + +func objectKey(namesapce, name string) string { + return fmt.Sprintf("%s/%s", namesapce, name) +} + +// GetPurpose returns the purpose of the shoot or 'evaluation' if it's nil. +func GetPurpose(s *gardencorev1beta1.Shoot) gardencorev1beta1.ShootPurpose { + if v := s.Spec.Purpose; v != nil { + return *v + } + return gardencorev1beta1.ShootPurposeEvaluation +} + +// KubernetesDashboardEnabled returns true if the kubernetes-dashboard addon is enabled in the Shoot manifest. +func KubernetesDashboardEnabled(addons *gardencorev1beta1.Addons) bool { + return addons != nil && addons.KubernetesDashboard != nil && addons.KubernetesDashboard.Enabled +} + +// NginxIngressEnabled returns true if the nginx-ingress addon is enabled in the Shoot manifest. +func NginxIngressEnabled(addons *gardencorev1beta1.Addons) bool { + return addons != nil && addons.NginxIngress != nil && addons.NginxIngress.Enabled +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types.go new file mode 100644 index 000000000..4548e905f --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types.go @@ -0,0 +1,21 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +const ( + // GardenerSeedLeaseNamespace is the namespace in which Gardenlet will report Seeds' + // status using Lease resources for each Seed + GardenerSeedLeaseNamespace = "gardener-system-seed-lease" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupbucket.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupbucket.go index 894a67842..893360494 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupbucket.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupbucket.go @@ -17,6 +17,7 @@ package v1beta1 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // +genclient @@ -52,7 +53,7 @@ type BackupBucketSpec struct { Provider BackupBucketProvider `json:"provider" protobuf:"bytes,1,opt,name=provider"` // ProviderConfig is the configuration passed to BackupBucket resource. // +optional - ProviderConfig *ProviderConfig `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` // SecretRef is a reference to a secret that contains the credentials to access object store. SecretRef corev1.SecretReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"` // SeedName holds the name of the seed allocated to BackupBucket for running controller. @@ -64,7 +65,7 @@ type BackupBucketSpec struct { type BackupBucketStatus struct { // ProviderStatus is the configuration passed to BackupBucket resource. // +optional - ProviderStatus *ProviderConfig `json:"providerStatus,omitempty" protobuf:"bytes,1,opt,name=providerStatus"` + ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty" protobuf:"bytes,1,opt,name=providerStatus"` // LastOperation holds information about the last operation on the BackupBucket. // +optional LastOperation *LastOperation `json:"lastOperation,omitempty" protobuf:"bytes,2,opt,name=lastOperation"` diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_cloudprofile.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_cloudprofile.go index f68b1bdfe..fa0a42255 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_cloudprofile.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_cloudprofile.go @@ -17,6 +17,7 @@ package v1beta1 import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // +genclient @@ -64,7 +65,7 @@ type CloudProfileSpec struct { MachineTypes []MachineType `json:"machineTypes" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,4,rep,name=machineTypes"` // ProviderConfig contains provider-specific configuration for the profile. // +optional - ProviderConfig *ProviderConfig `json:"providerConfig,omitempty" protobuf:"bytes,5,opt,name=providerConfig"` + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,5,opt,name=providerConfig"` // Regions contains constraints regarding allowed values for regions and zones. // +patchMergeKey=name // +patchStrategy=merge @@ -72,8 +73,10 @@ type CloudProfileSpec struct { // SeedSelector contains an optional list of labels on `Seed` resources that marks those seeds whose shoots may use this provider profile. // An empty list means that all seeds of the same provider type are supported. // This is useful for environments that are of the same type (like openstack) but may have different "instances"/landscapes. + // Optionally a list of possible providers can be added to enable cross-provider scheduling. By default, the provider + // type of the seed must match the shoot's provider. // +optional - SeedSelector *metav1.LabelSelector `json:"seedSelector,omitempty" protobuf:"bytes,7,opt,name=seedSelector"` + SeedSelector *SeedSelector `json:"seedSelector,omitempty" protobuf:"bytes,7,opt,name=seedSelector"` // Type is the name of the provider. Type string `json:"type" protobuf:"bytes,8,opt,name=type"` // VolumeTypes contains constraints regarding allowed values for volume types in the 'workers' block in the Shoot specification. @@ -83,6 +86,16 @@ type CloudProfileSpec struct { VolumeTypes []VolumeType `json:"volumeTypes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,rep,name=volumeTypes"` } +// SeedSelector contains constraints for selecting seed to be usable for shoots using a profile +type SeedSelector struct { + // LabelSelector is optional and can be used to select seeds by their label settings + // +optional + *metav1.LabelSelector `json:",inline,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` + // Providers is optional and can be used by restricting seeds by their provider type. '*' can be used to enable seeds regardless of their provider type. + // +optional + ProviderTypes []string `json:"providerTypes,omitempty" protobuf:"bytes,2,rep,name=providerTypes"` +} + // KubernetesSettings contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification. type KubernetesSettings struct { // Versions is the list of allowed Kubernetes versions with optional expiration dates for Shoot clusters. @@ -96,10 +109,18 @@ type KubernetesSettings struct { type MachineImage struct { // Name is the name of the image. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Versions contains versions and expiration dates of the machine image + // Versions contains versions, expiration dates and container runtimes of the machine image // +patchMergeKey=version // +patchStrategy=merge - Versions []ExpirableVersion `json:"versions" patchStrategy:"merge" patchMergeKey:"version" protobuf:"bytes,2,rep,name=versions"` + Versions []MachineImageVersion `json:"versions" patchStrategy:"merge" patchMergeKey:"version" protobuf:"bytes,2,rep,name=versions"` +} + +// MachineImageVersion is an expirable version with list of supported container runtimes and interfaces +type MachineImageVersion struct { + ExpirableVersion `json:",inline" protobuf:"bytes,1,opt,name=expirableVersion"` + // CRI list of supported container runtime and interfaces supported by this version + // +optional + CRI []CRI `json:"cri,omitempty" protobuf:"bytes,2,rep,name=cri"` } // ExpirableVersion contains a version and an expiration date. @@ -151,6 +172,11 @@ type Region struct { // +patchStrategy=merge // +optional Zones []AvailabilityZone `json:"zones,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=zones"` + // Labels is an optional set of key-value pairs that contain certain administrator-controlled labels for this region. + // It can be used by Gardener administrators/operators to provide additional information about a region, e.g. wrt + // quality, reliability, access restrictions, etc. + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,3,rep,name=labels"` } // AvailabilityZone is an availability zone. diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_common.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_common.go index 1a4a214d5..912289479 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_common.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_common.go @@ -20,16 +20,20 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" type ErrorCode string const ( - // ErrorInfraUnauthorized indicates that the last error occurred due to invalid cloud provider credentials. + // ErrorInfraUnauthorized indicates that the last error occurred due to invalid infrastructure credentials. ErrorInfraUnauthorized ErrorCode = "ERR_INFRA_UNAUTHORIZED" - // ErrorInfraInsufficientPrivileges indicates that the last error occurred due to insufficient cloud provider privileges. + // ErrorInfraInsufficientPrivileges indicates that the last error occurred due to insufficient infrastructure privileges. ErrorInfraInsufficientPrivileges ErrorCode = "ERR_INFRA_INSUFFICIENT_PRIVILEGES" - // ErrorInfraQuotaExceeded indicates that the last error occurred due to cloud provider quota limits. + // ErrorInfraQuotaExceeded indicates that the last error occurred due to infrastructure quota limits. ErrorInfraQuotaExceeded ErrorCode = "ERR_INFRA_QUOTA_EXCEEDED" - // ErrorInfraDependencies indicates that the last error occurred due to dependent objects on the cloud provider level. + // ErrorInfraDependencies indicates that the last error occurred due to dependent objects on the infrastructure level. ErrorInfraDependencies ErrorCode = "ERR_INFRA_DEPENDENCIES" - // ErrorCleanupClusterResources indicates that the last error occurred due to resources in the cluster are stuck in deletion. + // ErrorInfraResourcesDepleted indicates that the last error occurred due to depleted resource in the infrastructure. + ErrorInfraResourcesDepleted ErrorCode = "ERR_INFRA_RESOURCES_DEPLETED" + // ErrorCleanupClusterResources indicates that the last error occurred due to resources in the cluster that are stuck in deletion. ErrorCleanupClusterResources ErrorCode = "ERR_CLEANUP_CLUSTER_RESOURCES" + // ErrorConfigurationProblem indicates that the last error occurred due to a configuration problem. + ErrorConfigurationProblem ErrorCode = "ERR_CONFIGURATION_PROBLEM" ) // LastError indicates the last occurred error for an operation on a resource. @@ -47,26 +51,6 @@ type LastError struct { LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,4,opt,name=lastUpdateTime"` } -// GetDescription implements LastError. -func (l *LastError) GetDescription() string { - return l.Description -} - -// GetTaskID implements LastError -func (l *LastError) GetTaskID() *string { - return l.TaskID -} - -// GetCodes implements LastError. -func (l *LastError) GetCodes() []ErrorCode { - return l.Codes -} - -// GetLastUpdateTime implements LastError. -func (l *LastError) GetLastUpdateTime() *metav1.Time { - return l.LastUpdateTime -} - // LastOperationType is a string alias. type LastOperationType string @@ -79,6 +63,8 @@ const ( LastOperationTypeDelete LastOperationType = "Delete" // LastOperationTypeMigrate indicates a 'migrate' operation. LastOperationTypeMigrate LastOperationType = "Migrate" + // LastOperationTypeRestore indicates a 'restore' operation. + LastOperationTypeRestore LastOperationType = "Restore" ) // LastOperationState is a string alias. @@ -114,31 +100,6 @@ type LastOperation struct { Type LastOperationType `json:"type" protobuf:"bytes,5,opt,name=type,casttype=LastOperationType"` } -// GetDescription implements LastOperation. -func (l *LastOperation) GetDescription() string { - return l.Description -} - -// GetLastUpdateTime implements LastOperation. -func (l *LastOperation) GetLastUpdateTime() metav1.Time { - return l.LastUpdateTime -} - -// GetProgress implements LastOperation. -func (l *LastOperation) GetProgress() int32 { - return l.Progress -} - -// GetState implements LastOperation. -func (l *LastOperation) GetState() LastOperationState { - return l.State -} - -// GetType implements LastOperation. -func (l *LastOperation) GetType() LastOperationType { - return l.Type -} - // Gardener holds the information about the Gardener version that operated a resource. type Gardener struct { // ID is the Docker container id of the Gardener which last acted on a resource. @@ -156,11 +117,6 @@ const ( // ExternalGardenerName is the value in a Kubernetes core resources `.metadata.finalizers[]` array on which the // Gardener will react when performing a delete request on a resource. ExternalGardenerName = "gardener.cloud/gardener" - // ExternalGardenerNameDeprecated is the value in a Kubernetes core resources `.metadata.finalizers[]` array on which the - // Gardener will react when performing a delete request on a resource. - // - // Deprecated: Use `ExternalGardenerName` instead. - ExternalGardenerNameDeprecated = "garden.sapcloud.io/gardener" ) const ( @@ -176,6 +132,10 @@ const ( EventDeleted = "Deleted" // EventDeleteError indicates that the a Delete operation failed. EventDeleteError = "DeleteError" - // EventOperationPending - EventOperationPending = "OperationPending" + // EventPrepareMigration indicates that a Prepare Migration operation started. + EventPrepareMigration = "PrepareMigration" + // EventMigrationPrepared indicates that Migration preparation was successful. + EventMigrationPrepared = "MigrationPrepared" + // EventMigrationPreparationFailed indicates that Migration preparation failed. + EventMigrationPreparationFailed = "MigrationPreparationFailed" ) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerinstallation.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerinstallation.go index 7a01561fb..f2b2024cc 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerinstallation.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerinstallation.go @@ -17,6 +17,7 @@ package v1beta1 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // +genclient @@ -63,7 +64,7 @@ type ControllerInstallationStatus struct { Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` // ProviderStatus contains type-specific status. // +optional - ProviderStatus *ProviderConfig `json:"providerStatus,omitempty" protobuf:"bytes,2,opt,name=providerStatus"` + ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty" protobuf:"bytes,2,opt,name=providerStatus"` } const ( @@ -73,4 +74,7 @@ const ( ControllerInstallationInstalled ConditionType = "Installed" // ControllerInstallationValid is a condition type for indicating whether the installation request is valid. ControllerInstallationValid ConditionType = "Valid" + // ControllerInstallationRequired is a condition type for indicating that the respective extension controller is + // still required on the seed cluster as corresponding extension resources still exist. + ControllerInstallationRequired ConditionType = "Required" ) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerregistration.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerregistration.go index fa42fc330..c25970f0b 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerregistration.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerregistration.go @@ -16,6 +16,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // +genclient @@ -47,7 +48,8 @@ type ControllerRegistrationList struct { type ControllerRegistrationSpec struct { // Resources is a list of combinations of kinds (DNSProvider, Infrastructure, Generic, ...) and their actual types // (aws-route53, gcp, auditlog, ...). - Resources []ControllerResource `json:"resources" protobuf:"bytes,1,rep,name=resources"` + // +optional + Resources []ControllerResource `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"` // Deployment contains information for how this controller is deployed. // +optional Deployment *ControllerDeployment `json:"deployment,omitempty" protobuf:"bytes,2,opt,name=deployment"` @@ -66,6 +68,11 @@ type ControllerResource struct { // ReconcileTimeout defines how long Gardener should wait for the resource reconciliation. // +optional ReconcileTimeout *metav1.Duration `json:"reconcileTimeout,omitempty" protobuf:"bytes,4,opt,name=reconcileTimeout"` + // Primary determines if the controller backed by this ControllerRegistration is responsible for the extension + // resource's lifecycle. This field defaults to true. There must be exactly one primary controller for this kind/type + // combination. + // +optional + Primary *bool `json:"primary,omitempty" protobuf:"varint,5,opt,name=primary"` } // ControllerDeployment contains information for how this controller is deployed. @@ -74,5 +81,28 @@ type ControllerDeployment struct { Type string `json:"type" protobuf:"bytes,1,opt,name=type"` // ProviderConfig contains type-specific configuration. // +optional - ProviderConfig *ProviderConfig `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` + // Policy controls how the controller is deployed. It defaults to 'OnDemand'. + // +optional + Policy *ControllerDeploymentPolicy `json:"policy,omitempty" protobuf:"bytes,3,opt,name=policy"` + // SeedSelector contains an optional label selector for seeds. Only if the labels match then this controller will be + // considered for a deployment. + // An empty list means that all seeds are selected. + // +optional + SeedSelector *metav1.LabelSelector `json:"seedSelector,omitempty" protobuf:"bytes,4,opt,name=seedSelector"` } + +// ControllerDeploymentPolicy is a string alias. +type ControllerDeploymentPolicy string + +const ( + // ControllerDeploymentPolicyOnDemand specifies that the controller shall be only deployed if required by another + // resource. If nothing requires it then the controller shall not be deployed. + ControllerDeploymentPolicyOnDemand ControllerDeploymentPolicy = "OnDemand" + // ControllerDeploymentPolicyAlways specifies that the controller shall be deployed always, independent of whether + // another resource requires it or the respective seed has shoots. + ControllerDeploymentPolicyAlways ControllerDeploymentPolicy = "Always" + // ControllerDeploymentPolicyAlwaysExceptNoShoots specifies that the controller shall be deployed always, independent of + // whether another resource requires it, but only when the respective seed has at least one shoot. + ControllerDeploymentPolicyAlwaysExceptNoShoots ControllerDeploymentPolicy = "AlwaysExceptNoShoots" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_project.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_project.go index 10bdb6dc9..3938c9920 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_project.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_project.go @@ -77,6 +77,9 @@ type ProjectSpec struct { // A nil value means that Gardener will determine the name of the namespace. // +optional Namespace *string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"` + // Tolerations contains the tolerations for taints on seed clusters. + // +optional + Tolerations *ProjectTolerations `json:"tolerations,omitempty" protobuf:"bytes,7,opt,name=tolerations"` } // ProjectStatus holds the most recently observed status of the project. @@ -86,6 +89,13 @@ type ProjectStatus struct { ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` // Phase is the current phase of the project. Phase ProjectPhase `json:"phase,omitempty" protobuf:"bytes,2,opt,name=phase,casttype=ProjectPhase"` + // StaleSinceTimestamp contains the timestamp when the project was first discovered to be stale/unused. + // +optional + StaleSinceTimestamp *metav1.Time `json:"staleSinceTimestamp,omitempty" protobuf:"bytes,3,opt,name=staleSinceTimestamp"` + // StaleAutoDeleteTimestamp contains the timestamp when the project will be garbage-collected/automatically deleted + // because it's stale/unused. + // +optional + StaleAutoDeleteTimestamp *metav1.Time `json:"staleAutoDeleteTimestamp,omitempty" protobuf:"bytes,4,opt,name=staleAutoDeleteTimestamp"` } // ProjectMember is a member of a project. @@ -103,14 +113,39 @@ type ProjectMember struct { Roles []string `json:"roles,omitempty" protobuf:"bytes,3,rep,name=roles"` } +// ProjectTolerations contains the tolerations for taints on seed clusters. +type ProjectTolerations struct { + // Defaults contains a list of tolerations that are added to the shoots in this project by default. + // +patchMergeKey=key + // +patchStrategy=merge + // +optional + Defaults []Toleration `json:"defaults,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,rep,name=defaults"` + // Whitelist contains a list of tolerations that are allowed to be added to the shoots in this project. Please note + // that this list may only be added by users having the `spec-tolerations-whitelist` verb for project resources. + // +patchMergeKey=key + // +patchStrategy=merge + // +optional + Whitelist []Toleration `json:"whitelist,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,2,rep,name=whitelist"` +} + +// Toleration is a toleration for a seed taint. +type Toleration struct { + // Key is the toleration key to be applied to a project or shoot. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + // Value is the toleration value corresponding to the toleration key. + // +optional + Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` +} + const ( // ProjectMemberAdmin is a const for a role that provides full admin access. ProjectMemberAdmin = "admin" // ProjectMemberOwner is a const for a role that provides full owner access. ProjectMemberOwner = "owner" + // ProjectMemberUserAccessManager is a const for a role that provides permissions to manage human user(s, (groups)). + ProjectMemberUserAccessManager = "uam" // ProjectMemberViewer is a const for a role that provides limited permissions to only view some resources. ProjectMemberViewer = "viewer" - // ProjectMemberExtensionPrefix is a prefix for custom roles that are not known by Gardener. ProjectMemberExtensionPrefix = "extension:" ) @@ -132,6 +167,8 @@ const ( ProjectEventNamespaceReconcileFailed = "NamespaceReconcileFailed" // ProjectEventNamespaceReconcileSuccessful indicates that the namespace reconciliation has succeeded. ProjectEventNamespaceReconcileSuccessful = "NamespaceReconcileSuccessful" + // ProjectEventNamespaceNotEmpty indicates that the namespace cannot be released because it is not empty. + ProjectEventNamespaceNotEmpty = "NamespaceNotEmpty" // ProjectEventNamespaceDeletionFailed indicates that the namespace deletion failed. ProjectEventNamespaceDeletionFailed = "NamespaceDeletionFailed" // ProjectEventNamespaceMarkedForDeletion indicates that the namespace has been successfully marked for deletion. diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_seed.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_seed.go index 9eb6d82e9..8ba5dd072 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_seed.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_seed.go @@ -18,6 +18,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // +genclient @@ -47,6 +48,16 @@ type SeedList struct { Items []Seed `json:"items" protobuf:"bytes,2,rep,name=items"` } +// SeedTemplate is a template for creating a Seed object. +type SeedTemplate struct { + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the Seed. + // +optional + Spec SeedSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + // SeedSpec is the specification of a Seed. type SeedSpec struct { // Backup holds the object store configuration for the backups of shoot (currently only etcd). @@ -71,6 +82,12 @@ type SeedSpec struct { // Volume contains settings for persistentvolumes created in the seed cluster. // +optional Volume *SeedVolume `json:"volume,omitempty" protobuf:"bytes,7,opt,name=volume"` + // Settings contains certain settings for this seed cluster. + // +optional + Settings *SeedSettings `json:"settings,omitempty" protobuf:"bytes,8,opt,name=settings"` + // Ingress configures Ingress specific settings of the Seed cluster. + // +optional + Ingress *Ingress `json:"ingress,omitempty" protobuf:"bytes,9,opt,name=ingress"` } // SeedStatus is the status of a Seed. @@ -90,6 +107,16 @@ type SeedStatus struct { // Seed's generation, which is updated on mutation by the API Server. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,4,opt,name=observedGeneration"` + // ClusterIdentity is the identity of the Seed cluster + // +optional + ClusterIdentity *string `json:"clusterIdentity,omitempty" protobuf:"bytes,5,opt,name=clusterIdentity"` + // Capacity represents the total resources of a seed. + // +optional + Capacity corev1.ResourceList `json:"capacity,omitempty" protobuf:"bytes,6,rep,name=capacity"` + // Allocatable represents the resources of a seed that are available for scheduling. + // Defaults to Capacity. + // +optional + Allocatable corev1.ResourceList `json:"allocatable,omitempty" protobuf:"bytes,7,rep,name=allocatable"` } // SeedBackup contains the object store configuration for backups for shoot (currently only etcd). @@ -98,7 +125,7 @@ type SeedBackup struct { Provider string `json:"provider" protobuf:"bytes,1,opt,name=provider"` // ProviderConfig is the configuration passed to BackupBucket resource. // +optional - ProviderConfig *ProviderConfig `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` // Region is a region name. // +optional Region *string `json:"region,omitempty" protobuf:"bytes,3,opt,name=region"` @@ -111,8 +138,45 @@ type SeedBackup struct { // SeedDNS contains DNS-relevant information about this seed cluster. type SeedDNS struct { // IngressDomain is the domain of the Seed cluster pointing to the ingress controller endpoint. It will be used - // to construct ingress URLs for system applications running in Shoot clusters. - IngressDomain string `json:"ingressDomain" protobuf:"bytes,1,opt,name=ingressDomain"` + // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable. + // This will be removed in the next API version and replaced by spec.ingress.domain. + // +optional + IngressDomain *string `json:"ingressDomain,omitempty" protobuf:"bytes,1,opt,name=ingressDomain"` + // Provider configures a DNSProvider + // +optional + Provider *SeedDNSProvider `json:"provider,omitempty" protobuf:"bytes,2,opt,name=provider"` +} + +// SeedDNSProvider configures a DNSProvider for Seeds +type SeedDNSProvider struct { + // Type describes the type of the dns-provider, for example `aws-route53` + Type string `json:"type" protobuf:"bytes,1,opt,name=type"` + // SecretRef is a reference to a Secret object containing cloud provider credentials used for registering external domains. + SecretRef corev1.SecretReference `json:"secretRef" protobuf:"bytes,2,opt,name=secretRef"` + // Domains contains information about which domains shall be included/excluded for this provider. + // +optional + Domains *DNSIncludeExclude `json:"domains,omitempty" protobuf:"bytes,3,opt,name=domains"` + // Zones contains information about which hosted zones shall be included/excluded for this provider. + // +optional + Zones *DNSIncludeExclude `json:"zones,omitempty" protobuf:"bytes,4,opt,name=zones"` +} + +// Ingress configures the Ingress specific settings of the Seed cluster +type Ingress struct { + // Domain specifies the IngressDomain of the Seed cluster pointing to the ingress controller endpoint. It will be used + // to construct ingress URLs for system applications running in Shoot clusters. Once set this field is immutable. + Domain string `json:"domain" protobuf:"bytes,1,opt,name=domain"` + // Controller configures a Gardener managed Ingress Controller listening on the ingressDomain + Controller IngressController `json:"controller" protobuf:"bytes,2,opt,name=controller"` +} + +// IngressController enables a Gardener managed Ingress Controller listening on the ingressDomain +type IngressController struct { + // Kind defines which kind of IngressController to use, for example `nginx` + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // ProviderConfig specifies infrastructure specific configuration for the ingressController + // +optional + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` } // SeedNetworks contains CIDRs for the pod, service and node networks of a Kubernetes cluster. @@ -149,11 +213,71 @@ type SeedProvider struct { Type string `json:"type" protobuf:"bytes,1,opt,name=type"` // ProviderConfig is the configuration passed to Seed resource. // +optional - ProviderConfig *ProviderConfig `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` // Region is a name of a region. Region string `json:"region" protobuf:"bytes,3,opt,name=region"` } +// SeedSettings contains certain settings for this seed cluster. +type SeedSettings struct { + // ExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the seed. + // +optional + ExcessCapacityReservation *SeedSettingExcessCapacityReservation `json:"excessCapacityReservation,omitempty" protobuf:"bytes,1,opt,name=excessCapacityReservation"` + // Scheduling controls settings for scheduling decisions for the seed. + // +optional + Scheduling *SeedSettingScheduling `json:"scheduling,omitempty" protobuf:"bytes,2,opt,name=scheduling"` + // ShootDNS controls the shoot DNS settings for the seed. + // +optional + ShootDNS *SeedSettingShootDNS `json:"shootDNS,omitempty" protobuf:"bytes,3,opt,name=shootDNS"` + // LoadBalancerServices controls certain settings for services of type load balancer that are created in the + // seed. + // +optional + LoadBalancerServices *SeedSettingLoadBalancerServices `json:"loadBalancerServices,omitempty" protobuf:"bytes,4,opt,name=loadBalancerServices"` + // VerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the seed. + // +optional + VerticalPodAutoscaler *SeedSettingVerticalPodAutoscaler `json:"verticalPodAutoscaler,omitempty" protobuf:"bytes,5,opt,name=verticalPodAutoscaler"` +} + +// SeedSettingExcessCapacityReservation controls the excess capacity reservation for shoot control planes in the +// seed. When enabled then this is done via PodPriority and requires the Seed cluster to have Kubernetes version 1.11 +// or the PodPriority feature gate as well as the scheduling.k8s.io/v1alpha1 API group enabled. +type SeedSettingExcessCapacityReservation struct { + // Enabled controls whether the excess capacity reservation should be enabled. + Enabled bool `json:"enabled" protobuf:"bytes,1,opt,name=enabled"` +} + +// SeedSettingShootDNS controls the shoot DNS settings for the seed. +type SeedSettingShootDNS struct { + // Enabled controls whether the DNS for shoot clusters should be enabled. When disabled then all shoots using the + // seed won't get any DNS providers, DNS records, and no DNS extension controller is required to be installed here. + // This is useful for environments where DNS is not required. + Enabled bool `json:"enabled" protobuf:"bytes,1,opt,name=enabled"` +} + +// SeedSettingScheduling controls settings for scheduling decisions for the seed. +type SeedSettingScheduling struct { + // Visible controls whether the gardener-scheduler shall consider this seed when scheduling shoots. Invisible seeds + // are not considered by the scheduler. + Visible bool `json:"visible" protobuf:"bytes,1,opt,name=visible"` +} + +// SeedSettingLoadBalancerServices controls certain settings for services of type load balancer that are created in the +// seed. +type SeedSettingLoadBalancerServices struct { + // Annotations is a map of annotations that will be injected/merged into every load balancer service object. + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,1,rep,name=annotations"` +} + +// SeedSettingVerticalPodAutoscaler controls certain settings for the vertical pod autoscaler components deployed in the +// seed. +type SeedSettingVerticalPodAutoscaler struct { + // Enabled controls whether the VPA components shall be deployed into the garden namespace in the seed cluster. It + // is enabled by default because Gardener heavily relies on a VPA being deployed. You should only disable this if + // your seed cluster already has another, manually/custom managed VPA deployment. + Enabled bool `json:"enabled" protobuf:"bytes,1,opt,name=enabled"` +} + // SeedTaint describes a taint on a seed. type SeedTaint struct { // Key is the taint key to be applied to a seed. @@ -164,20 +288,9 @@ type SeedTaint struct { } const ( - // SeedTaintDisableDNS is a constant for a taint key on a seed that marks it for disabling DNS. All shoots - // using this seed won't get any DNS providers, DNS records, and no DNS extension controller is required to - // be installed here. This is useful for environment where DNS is not required. - SeedTaintDisableDNS = "seed.gardener.cloud/disable-dns" // SeedTaintProtected is a constant for a taint key on a seed that marks it as protected. Protected seeds // may only be used by shoots in the `garden` namespace. SeedTaintProtected = "seed.gardener.cloud/protected" - // SeedTaintInvisible is a constant for a taint key on a seed that marks it as invisible. Invisible seeds - // are not considered by the gardener-scheduler. - SeedTaintInvisible = "seed.gardener.cloud/invisible" - // SeedTaintDisableCapacityReservation is a constant for a taint key on a seed that marks it for disabling - // excess capacity reservation. This can be useful for seed clusters which only host shooted seeds to reduce - // costs. - SeedTaintDisableCapacityReservation = "seed.gardener.cloud/disable-capacity-reservation" ) // SeedVolume contains settings for persistentvolumes created in the seed cluster. @@ -209,3 +322,9 @@ const ( // SeedGardenletReady is a constant for a condition type indicating that the Gardenlet is ready. SeedGardenletReady ConditionType = "GardenletReady" ) + +// Resource constants for Gardener object types +const ( + // ResourceShoots is a resource constant for the number of shoots. + ResourceShoots corev1.ResourceName = "shoots" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go index e3ef740e7..7dc41bbb2 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go @@ -17,9 +17,11 @@ package v1beta1 import ( "time" + autoscalingv1 "k8s.io/api/autoscaling/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -92,6 +94,17 @@ type ShootSpec struct { // SeedName is the name of the seed cluster that runs the control plane of the Shoot. // +optional SeedName *string `json:"seedName,omitempty" protobuf:"bytes,14,opt,name=seedName"` + // SeedSelector is an optional selector which must match a seed's labels for the shoot to be scheduled on that seed. + // +optional + SeedSelector *SeedSelector `json:"seedSelector,omitempty" protobuf:"bytes,15,opt,name=seedSelector"` + // Resources holds a list of named resource references that can be referred to in extension configs by their names. + // +optional + Resources []NamedResourceReference `json:"resources,omitempty" protobuf:"bytes,16,rep,name=resources"` + // Tolerations contains the tolerations for taints on seed clusters. + // +patchMergeKey=key + // +patchStrategy=merge + // +optional + Tolerations []Toleration `json:"tolerations,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,17,rep,name=tolerations"` } // ShootStatus holds the most recently observed status of the Shoot cluster. @@ -134,6 +147,9 @@ type ShootStatus struct { // UID is a unique identifier for the Shoot cluster to avoid portability between Kubernetes clusters. // It is used to compute unique hashes. UID types.UID `json:"uid" protobuf:"bytes,11,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // ClusterIdentity is the identity of the Shoot cluster + // +optional + ClusterIdentity *string `json:"clusterIdentity,omitempty" protobuf:"bytes,12,opt,name=clusterIdentity"` } ////////////////////////////////////////////////////////////////////////////////////////////////// @@ -174,7 +190,7 @@ const ( // NginxIngress describes configuration values for the nginx-ingress addon. type NginxIngress struct { Addon `json:",inline" protobuf:"bytes,1,opt,name=addon"` - // LoadBalancerSourceRanges is list of whitelist IP sources for NginxIngress + // LoadBalancerSourceRanges is list of allowed IP sources for NginxIngress // +optional LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,2,rep,name=loadBalancerSourceRanges"` // Config contains custom configuration for the nginx-ingress-controller configuration. @@ -194,7 +210,7 @@ type NginxIngress struct { // DNS holds information about the provider, the hosted zone id and the domain. type DNS struct { // Domain is the external available domain of the Shoot cluster. This domain will be written into the - // kubeconfig that is handed out to end-users. + // kubeconfig that is handed out to end-users. Once set it is immutable. // +optional Domain *string `json:"domain,omitempty" protobuf:"bytes,1,opt,name=domain"` // Providers is a list of DNS providers that shall be enabled for this shoot cluster. Only relevant if @@ -249,7 +265,22 @@ type Extension struct { Type string `json:"type" protobuf:"bytes,1,opt,name=type"` // ProviderConfig is the configuration passed to extension resource. // +optional - ProviderConfig *ProviderConfig `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` + // Disabled allows to disable extensions that were marked as 'globally enabled' by Gardener administrators. + // +optional + Disabled *bool `json:"disabled,omitempty" protobuf:"varint,3,opt,name=disabled"` +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// NamedResourceReference relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// NamedResourceReference is a named reference to a resource. +type NamedResourceReference struct { + // Name of the resource reference. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // ResourceRef is a reference to a resource. + ResourceRef autoscalingv1.CrossVersionObjectReference `json:"resourceRef" protobuf:"bytes,2,opt,name=resourceRef"` } ////////////////////////////////////////////////////////////////////////////////////////////////// @@ -291,7 +322,7 @@ type Kubernetes struct { // AllowPrivilegedContainers indicates whether privileged containers are allowed in the Shoot (default: true). // +optional AllowPrivilegedContainers *bool `json:"allowPrivilegedContainers,omitempty" protobuf:"varint,1,opt,name=allowPrivilegedContainers"` - // ClusterAutoscaler contains the configration flags for the Kubernetes cluster autoscaler. + // ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler. // +optional ClusterAutoscaler *ClusterAutoscaler `json:"clusterAutoscaler,omitempty" protobuf:"bytes,2,opt,name=clusterAutoscaler"` // KubeAPIServer contains configuration settings for the kube-apiserver. @@ -311,9 +342,12 @@ type Kubernetes struct { Kubelet *KubeletConfig `json:"kubelet,omitempty" protobuf:"bytes,7,opt,name=kubelet"` // Version is the semantic Kubernetes version to use for the Shoot cluster. Version string `json:"version" protobuf:"bytes,8,opt,name=version"` + // VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler. + // +optional + VerticalPodAutoscaler *VerticalPodAutoscaler `json:"verticalPodAutoscaler,omitempty" protobuf:"bytes,9,opt,name=verticalPodAutoscaler"` } -// ClusterAutoscaler contains the configration flags for the Kubernetes cluster autoscaler. +// ClusterAutoscaler contains the configuration flags for the Kubernetes cluster autoscaler. type ClusterAutoscaler struct { // ScaleDownDelayAfterAdd defines how long after scale up that scale down evaluation resumes (default: 1 hour). // +optional @@ -335,6 +369,57 @@ type ClusterAutoscaler struct { ScanInterval *metav1.Duration `json:"scanInterval,omitempty" protobuf:"bytes,6,opt,name=scanInterval"` } +// VerticalPodAutoscaler contains the configuration flags for the Kubernetes vertical pod autoscaler. +type VerticalPodAutoscaler struct { + // Enabled specifies whether the Kubernetes VPA shall be enabled for the shoot cluster. + Enabled bool `json:"enabled" protobuf:"varint,1,opt,name=enabled"` + // EvictAfterOOMThreshold defines the threshold that will lead to pod eviction in case it OOMed in less than the given + // threshold since its start and if it has only one container (default: 10m0s). + // +optional + EvictAfterOOMThreshold *metav1.Duration `json:"evictAfterOOMThreshold,omitempty" protobuf:"bytes,2,opt,name=evictAfterOOMThreshold"` + // EvictionRateBurst defines the burst of pods that can be evicted (default: 1) + // +optional + EvictionRateBurst *int32 `json:"evictionRateBurst,omitempty" protobuf:"varint,3,opt,name=evictionRateBurst"` + // EvictionRateLimit defines the number of pods that can be evicted per second. A rate limit set to 0 or -1 will + // disable the rate limiter (default: -1). + // +optional + EvictionRateLimit *float64 `json:"evictionRateLimit,omitempty" protobuf:"fixed64,4,opt,name=evictionRateLimit"` + // EvictionTolerance defines the fraction of replica count that can be evicted for update in case more than one + // pod can be evicted (default: 0.5). + // +optional + EvictionTolerance *float64 `json:"evictionTolerance,omitempty" protobuf:"fixed64,5,opt,name=evictionTolerance"` + // RecommendationMarginFraction is the fraction of usage added as the safety margin to the recommended request + // (default: 0.15). + // +optional + RecommendationMarginFraction *float64 `json:"recommendationMarginFraction,omitempty" protobuf:"fixed64,6,opt,name=recommendationMarginFraction"` + // UpdaterInterval is the interval how often the updater should run (default: 1m0s). + // +optional + UpdaterInterval *metav1.Duration `json:"updaterInterval,omitempty" protobuf:"bytes,7,opt,name=updaterInterval"` + // RecommenderInterval is the interval how often metrics should be fetched (default: 1m0s). + // +optional + RecommenderInterval *metav1.Duration `json:"recommenderInterval,omitempty" protobuf:"bytes,8,opt,name=recommenderInterval"` +} + +const ( + // DefaultEvictionRateBurst is the default value for the EvictionRateBurst field in the VPA configuration. + DefaultEvictionRateBurst int32 = 1 + // DefaultEvictionRateLimit is the default value for the EvictionRateLimit field in the VPA configuration. + DefaultEvictionRateLimit float64 = -1 + // DefaultEvictionTolerance is the default value for the EvictionTolerance field in the VPA configuration. + DefaultEvictionTolerance = 0.5 + // DefaultRecommendationMarginFraction is the default value for the RecommendationMarginFraction field in the VPA configuration. + DefaultRecommendationMarginFraction = 0.15 +) + +var ( + // DefaultEvictAfterOOMThreshold is the default value for the EvictAfterOOMThreshold field in the VPA configuration. + DefaultEvictAfterOOMThreshold = metav1.Duration{Duration: 10 * time.Minute} + // DefaultUpdaterInterval is the default value for the UpdaterInterval field in the VPA configuration. + DefaultUpdaterInterval = metav1.Duration{Duration: time.Minute} + // DefaultRecommenderInterval is the default value for the RecommenderInterval field in the VPA configuration. + DefaultRecommenderInterval = metav1.Duration{Duration: time.Minute} +) + // KubernetesConfig contains common configuration fields for the control plane components. type KubernetesConfig struct { // FeatureGates contains information about enabled feature gates. @@ -372,6 +457,30 @@ type KubeAPIServerConfig struct { // of the kube-apiserver. // +optional ServiceAccountConfig *ServiceAccountConfig `json:"serviceAccountConfig,omitempty" protobuf:"bytes,8,opt,name=serviceAccountConfig"` + // WatchCacheSizes contains configuration of the API server's watch cache sizes. + // Configuring these flags might be useful for large-scale Shoot clusters with a lot of parallel update requests + // and a lot of watching controllers (e.g. large shooted Seed clusters). When the API server's watch cache's + // capacity is too small to cope with the amount of update requests and watchers for a particular resource, it + // might happen that controller watches are permanently stopped with `too old resource version` errors. + // Starting from kubernetes v1.19, the API server's watch cache size is adapted dynamically and setting the watch + // cache size flags will have no effect, except when setting it to 0 (which disables the watch cache). + // +optional + WatchCacheSizes *WatchCacheSizes `json:"watchCacheSizes,omitempty" protobuf:"bytes,9,opt,name=watchCacheSizes"` + // Requests contains configuration for request-specific settings for the kube-apiserver. + // +optional + Requests *KubeAPIServerRequests `json:"requests,omitempty" protobuf:"bytes,10,opt,name=requests"` +} + +// KubeAPIServerRequests contains configuration for request-specific settings for the kube-apiserver. +type KubeAPIServerRequests struct { + // MaxNonMutatingInflight is the maximum number of non-mutating requests in flight at a given time. When the server + // exceeds this, it rejects requests. + // +optional + MaxNonMutatingInflight *int32 `json:"maxNonMutatingInflight,omitempty" protobuf:"bytes,1,name=maxNonMutatingInflight"` + // MaxMutatingInflight is the maximum number of mutating requests in flight at a given time. When the server + // exceeds this, it rejects requests. + // +optional + MaxMutatingInflight *int32 `json:"maxMutatingInflight,omitempty" protobuf:"bytes,2,name=maxMutatingInflight"` } // ServiceAccountConfig is the kube-apiserver configuration for service accounts. @@ -456,7 +565,34 @@ type AdmissionPlugin struct { Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Config is the configuration of the plugin. // +optional - Config *ProviderConfig `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` + Config *runtime.RawExtension `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` +} + +// WatchCacheSizes contains configuration of the API server's watch cache sizes. +type WatchCacheSizes struct { + // Default configures the default watch cache size of the kube-apiserver + // (flag `--default-watch-cache-size`, defaults to 100). + // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/ + // +optional + Default *int32 `json:"default,omitempty" protobuf:"varint,1,opt,name=default"` + // Resources configures the watch cache size of the kube-apiserver per resource + // (flag `--watch-cache-sizes`). + // See: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/ + // +optional + Resources []ResourceWatchCacheSize `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"` +} + +// ResourceWatchCacheSize contains configuration of the API server's watch cache size for one specific resource. +type ResourceWatchCacheSize struct { + // APIGroup is the API group of the resource for which the watch cache size should be configured. + // An unset value is used to specify the legacy core API (e.g. for `secrets`). + // +optional + APIGroup *string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` + // Resource is the name of the resource for which the watch cache size should be configured + // (in lowercase plural form, e.g. `secrets`). + Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"` + // CacheSize specifies the watch cache size that should be configured for the specified resource. + CacheSize int32 `json:"size" protobuf:"varint,3,opt,name=size"` } // KubeControllerManagerConfig contains configuration settings for the kube-controller-manager. @@ -468,6 +604,9 @@ type KubeControllerManagerConfig struct { // NodeCIDRMaskSize defines the mask size for node cidr in cluster (default is 24) // +optional NodeCIDRMaskSize *int32 `json:"nodeCIDRMaskSize,omitempty" protobuf:"varint,3,opt,name=nodeCIDRMaskSize"` + // PodEvictionTimeout defines the grace period for deleting pods on failed nodes. Defaults to 2m. + // +optional + PodEvictionTimeout *metav1.Duration `json:"podEvictionTimeout,omitempty" protobuf:"bytes,4,opt,name=podEvictionTimeout"` } // HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager. @@ -516,6 +655,12 @@ const ( // KubeSchedulerConfig contains configuration settings for the kube-scheduler. type KubeSchedulerConfig struct { KubernetesConfig `json:",inline" protobuf:"bytes,1,opt,name=kubernetesConfig"` + // KubeMaxPDVols allows to configure the `KUBE_MAX_PD_VOLS` environment variable for the kube-scheduler. + // Please find more information here: https://kubernetes.io/docs/concepts/storage/storage-limits/#custom-limits + // Note that using this field is considered alpha-/experimental-level and is on your own risk. You should be aware + // of all the side-effects and consequences when changing it. + // +optional + KubeMaxPDVols *string `json:"kubeMaxPDVols,omitempty" protobuf:"bytes,2,opt,name=kubeMaxPDVols"` } // KubeProxyConfig contains configuration settings for the kube-proxy. @@ -601,6 +746,18 @@ type KubeletConfig struct { // +optional // Default: 1m ImagePullProgressDeadline *metav1.Duration `json:"imagePullProgressDeadline,omitempty" protobuf:"bytes,12,opt,name=imagePullProgressDeadline"` + // FailSwapOn makes the Kubelet fail to start if swap is enabled on the node. (default true). + // +optional + FailSwapOn *bool `json:"failSwapOn,omitempty" protobuf:"varint,13,opt,name=failSwapOn"` + // KubeReserved is the configuration for resources reserved for kubernetes node components (mainly kubelet and container runtime). + // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied. + // +optional + // Default: cpu=80m,memory=1Gi,pid=20k + KubeReserved *KubeletConfigReserved `json:"kubeReserved,omitempty" protobuf:"bytes,14,opt,name=kubeReserved"` + // SystemReserved is the configuration for resources reserved for system processes not managed by kubernetes (e.g. journald). + // When updating these values, be aware that cgroup resizes may not succeed on active worker nodes. Look for the NodeAllocatableEnforced event to determine if the configuration was applied. + // +optional + SystemReserved *KubeletConfigReserved `json:"systemReserved,omitempty" protobuf:"bytes,15,opt,name=systemReserved"` } // KubeletConfigEviction contains kubelet eviction thresholds supporting either a resource.Quantity or a percentage based value. @@ -660,6 +817,23 @@ type KubeletConfigEvictionSoftGracePeriod struct { NodeFSInodesFree *metav1.Duration `json:"nodeFSInodesFree,omitempty" protobuf:"bytes,5,opt,name=nodeFSInodesFree"` } +// KubeletConfigReserved contains reserved resources for daemons +type KubeletConfigReserved struct { + // CPU is the reserved cpu. + // +optional + CPU *resource.Quantity `json:"cpu,omitempty" protobuf:"bytes,1,opt,name=cpu"` + // Memory is the reserved memory. + // +optional + Memory *resource.Quantity `json:"memory,omitempty" protobuf:"bytes,2,opt,name=memory"` + // EphemeralStorage is the reserved ephemeral-storage. + // +optional + EphemeralStorage *resource.Quantity `json:"ephemeralStorage,omitempty" protobuf:"bytes,3,opt,name=ephemeralStorage"` + // PID is the reserved process-ids. + // To reserve PID, the SupportNodePidsLimit feature gate must be enabled in Kubernetes versions < 1.15. + // +optional + PID *resource.Quantity `json:"pid,omitempty" protobuf:"bytes,4,opt,name=pid"` +} + ////////////////////////////////////////////////////////////////////////////////////////////////// // Networking relevant types // ////////////////////////////////////////////////////////////////////////////////////////////////// @@ -670,7 +844,7 @@ type Networking struct { Type string `json:"type" protobuf:"bytes,1,opt,name=type"` // ProviderConfig is the configuration passed to network resource. // +optional - ProviderConfig *ProviderConfig `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` // Pods is the CIDR of the pod network. // +optional Pods *string `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` @@ -693,6 +867,13 @@ const ( // Maintenance relevant types // ////////////////////////////////////////////////////////////////////////////////////////////////// +const ( + // MaintenanceTimeWindowDurationMinimum is the minimum duration for a maintenance time window. + MaintenanceTimeWindowDurationMinimum = 30 * time.Minute + // MaintenanceTimeWindowDurationMaximum is the maximum duration for a maintenance time window. + MaintenanceTimeWindowDurationMaximum = 6 * time.Hour +) + // Maintenance contains information about the time window for maintenance operations and which // operations should be performed. type Maintenance struct { @@ -702,6 +883,11 @@ type Maintenance struct { // TimeWindow contains information about the time window for maintenance operations. // +optional TimeWindow *MaintenanceTimeWindow `json:"timeWindow,omitempty" protobuf:"bytes,2,opt,name=timeWindow"` + // ConfineSpecUpdateRollout prevents that changes/updates to the shoot specification will be rolled out immediately. + // Instead, they are rolled out during the shoot's maintenance time window. There is one exception that will trigger + // an immediate roll out which is changes to the Spec.Hibernation.Enabled field. + // +optional + ConfineSpecUpdateRollout *bool `json:"confineSpecUpdateRollout,omitempty" protobuf:"varint,3,opt,name=confineSpecUpdateRollout"` } // MaintenanceAutoUpdate contains information about which constraints should be automatically updated. @@ -752,11 +938,11 @@ type Provider struct { // ControlPlaneConfig contains the provider-specific control plane config blob. Please look up the concrete // definition in the documentation of your provider extension. // +optional - ControlPlaneConfig *ProviderConfig `json:"controlPlaneConfig,omitempty" protobuf:"bytes,2,opt,name=controlPlaneConfig"` + ControlPlaneConfig *runtime.RawExtension `json:"controlPlaneConfig,omitempty" protobuf:"bytes,2,opt,name=controlPlaneConfig"` // InfrastructureConfig contains the provider-specific infrastructure config blob. Please look up the concrete // definition in the documentation of your provider extension. // +optional - InfrastructureConfig *ProviderConfig `json:"infrastructureConfig,omitempty" protobuf:"bytes,3,opt,name=infrastructureConfig"` + InfrastructureConfig *runtime.RawExtension `json:"infrastructureConfig,omitempty" protobuf:"bytes,3,opt,name=infrastructureConfig"` // Workers is a list of worker groups. // +patchMergeKey=name // +patchStrategy=merge @@ -796,7 +982,7 @@ type Worker struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,11,opt,name=maxUnavailable"` // ProviderConfig is the provider-specific configuration for this worker pool. // +optional - ProviderConfig *ProviderConfig `json:"providerConfig,omitempty" protobuf:"bytes,12,opt,name=providerConfig"` + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,12,opt,name=providerConfig"` // Taints is a list of taints for all the `Node` objects in this worker pool. // +optional Taints []corev1.Taint `json:"taints,omitempty" protobuf:"bytes,13,rep,name=taints"` @@ -805,7 +991,7 @@ type Worker struct { Volume *Volume `json:"volume,omitempty" protobuf:"bytes,14,opt,name=volume"` // DataVolumes contains a list of additional worker volumes. // +optional - DataVolumes []Volume `json:"dataVolumes,omitempty" protobuf:"bytes,15,rep,name=dataVolumes"` + DataVolumes []DataVolume `json:"dataVolumes,omitempty" protobuf:"bytes,15,rep,name=dataVolumes"` // KubeletDataVolumeName contains the name of a dataVolume that should be used for storing kubelet state. // +optional KubeletDataVolumeName *string `json:"kubeletDataVolumeName,omitempty" protobuf:"bytes,16,opt,name=kubeletDataVolumeName"` @@ -813,6 +999,37 @@ type Worker struct { // as not every provider may support availability zones. // +optional Zones []string `json:"zones,omitempty" protobuf:"bytes,17,rep,name=zones"` + // SystemComponents contains configuration for system components related to this worker pool + // +optional + SystemComponents *WorkerSystemComponents `json:"systemComponents,omitempty" protobuf:"bytes,18,opt,name=systemComponents"` + // MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout. + // +optional + MachineControllerManagerSettings *MachineControllerManagerSettings `json:"machineControllerManager,omitempty" protobuf:"bytes,19,opt,name=machineControllerManager"` +} + +// MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout. +type MachineControllerManagerSettings struct { + // MachineDrainTimeout is the period after which machine is forcefully deleted. + // +optional + MachineDrainTimeout *metav1.Duration `json:"machineDrainTimeout,omitempty" protobuf:"bytes,1,name=machineDrainTimeout"` + // MachineHealthTimeout is the period after which machine is declared failed. + // +optional + MachineHealthTimeout *metav1.Duration `json:"machineHealthTimeout,omitempty" protobuf:"bytes,2,name=machineHealthTimeout"` + // MachineCreationTimeout is the period after which creation of the machine is declared failed. + // +optional + MachineCreationTimeout *metav1.Duration `json:"machineCreationTimeout,omitempty" protobuf:"bytes,3,name=machineCreationTimeout"` + // MaxEvictRetries are the number of eviction retries on a pod after which drain is declared failed, and forceful deletion is triggered. + // +optional + MaxEvictRetries *int32 `json:"maxEvictRetries,omitempty" protobuf:"bytes,4,name=maxEvictRetries"` + // NodeConditions are the set of conditions if set to true for the period of MachineHealthTimeout, machine will be declared failed. + // +optional + NodeConditions []string `json:"nodeConditions,omitempty" protobuf:"bytes,5,name=nodeConditions"` +} + +// WorkerSystemComponents contains configuration for system components related to this worker pool +type WorkerSystemComponents struct { + // Allow determines whether the pool should be allowed to host system components or not (defaults to true) + Allow bool `json:"allow" protobuf:"bytes,1,name=allow"` } // WorkerKubernetes contains configuration for Kubernetes components related to this worker pool. @@ -839,14 +1056,14 @@ type ShootMachineImage struct { Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // ProviderConfig is the shoot's individual configuration passed to an extension resource. // +optional - ProviderConfig *ProviderConfig `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` // Version is the version of the shoot's image. // If version is not provided, it will be defaulted to the latest version from the CloudProfile. // +optional Version *string `json:"version,omitempty" protobuf:"bytes,3,opt,name=version"` } -// Volume contains information about the volume type and size. +// Volume contains information about the volume type, size, and encryption. type Volume struct { // Name of the volume to make it referencable. // +optional @@ -861,6 +1078,20 @@ type Volume struct { Encrypted *bool `json:"encrypted,omitempty" protobuf:"varint,4,opt,name=encrypted"` } +// DataVolume contains information about a data volume. +type DataVolume struct { + // Name of the volume to make it referencable. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Type is the type of the volume. + // +optional + Type *string `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"` + // VolumeSize is the size of the volume. + VolumeSize string `json:"size" protobuf:"bytes,3,opt,name=size"` + // Encrypted determines if the volume should be encrypted. + // +optional + Encrypted *bool `json:"encrypted,omitempty" protobuf:"varint,4,opt,name=encrypted"` +} + // CRI contains information about the Container Runtimes. type CRI struct { // The name of the CRI library @@ -884,7 +1115,7 @@ type ContainerRuntime struct { // ProviderConfig is the configuration passed to container runtime resource. // +optional - ProviderConfig *ProviderConfig `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty" protobuf:"bytes,2,opt,name=providerConfig"` } var ( @@ -892,6 +1123,8 @@ var ( DefaultWorkerMaxSurge = intstr.FromInt(1) // DefaultWorkerMaxUnavailable is the default value for Worker MaxUnavailable. DefaultWorkerMaxUnavailable = intstr.FromInt(0) + // DefaultWorkerSystemComponentsAllow is the default value for Worker AllowSystemComponents + DefaultWorkerSystemComponentsAllow = true ) ////////////////////////////////////////////////////////////////////////////////////////////////// @@ -899,11 +1132,14 @@ var ( ////////////////////////////////////////////////////////////////////////////////////////////////// const ( - // ShootEventMaintenanceDone indicates that a maintenance operation has been performed. - ShootEventMaintenanceDone = "MaintenanceDone" - // ShootEventMaintenanceError indicates that a maintenance operation has failed. - ShootEventMaintenanceError = "MaintenanceError" - + // ShootEventImageVersionMaintenance indicates that a maintenance operation regarding the image version has been performed. + ShootEventImageVersionMaintenance = "MachineImageVersionMaintenance" + // ShootEventK8sVersionMaintenance indicates that a maintenance operation regarding the K8s version has been performed. + ShootEventK8sVersionMaintenance = "KubernetesVersionMaintenance" + // ShootEventHibernationEnabled indicates that hibernation started. + ShootEventHibernationEnabled = "Hibernated" + // ShootEventHibernationDisabled indicates that hibernation ended. + ShootEventHibernationDisabled = "WokenUp" // ShootEventSchedulingSuccessful indicates that a scheduling decision was taken successfully. ShootEventSchedulingSuccessful = "SchedulingSuccessful" // ShootEventSchedulingFailed indicates that a scheduling decision failed. @@ -921,6 +1157,9 @@ const ( ShootSystemComponentsHealthy ConditionType = "SystemComponentsHealthy" // ShootHibernationPossible is a constant for a condition type indicating whether the Shoot can be hibernated. ShootHibernationPossible ConditionType = "HibernationPossible" + // ShootMaintenancePreconditionsSatisfied is a constant for a condition type indicating whether all preconditions + // for a shoot maintenance operation are satisfied. + ShootMaintenancePreconditionsSatisfied ConditionType = "MaintenancePreconditionsSatisfied" ) // ShootPurpose is a type alias for string. diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_utils.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_utils.go index dedd8b4ae..63dc3d584 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_utils.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_utils.go @@ -16,7 +16,6 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" ) const ( @@ -26,22 +25,6 @@ const ( EventSchedulingFailed = "SchedulingFailed" ) -// ProviderConfig is a workaround for missing OpenAPI functions on runtime.RawExtension struct. -// https://github.com/kubernetes/kubernetes/issues/55890 -// https://github.com/kubernetes-sigs/cluster-api/issues/137 -type ProviderConfig struct { - runtime.RawExtension `json:",inline" protobuf:"bytes,1,opt,name=rawExtension"` -} - -// OpenAPISchemaType is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators -func (ProviderConfig) OpenAPISchemaType() []string { return []string{"object"} } - -// OpenAPISchemaFormat is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -func (ProviderConfig) OpenAPISchemaFormat() string { return "" } - // ConditionStatus is the status of a condition. type ConditionStatus string @@ -62,6 +45,9 @@ type Condition struct { Reason string `json:"reason" protobuf:"bytes,5,opt,name=reason"` // A human readable message indicating details about the transition. Message string `json:"message" protobuf:"bytes,6,opt,name=message"` + // Well-defined error codes in case the condition reports a problem. + // +optional + Codes []ErrorCode `json:"codes,omitempty" protobuf:"bytes,7,rep,name=codes,casttype=ErrorCode"` } const ( @@ -77,4 +63,10 @@ const ( // ConditionCheckError is a constant for a reason in condition. ConditionCheckError = "ConditionCheckError" + // ManagedResourceMissingConditionError is a constant for a reason in a condition that indicates + // one or multiple missing conditions in the observed managed resource. + ManagedResourceMissingConditionError = "MissingManagedResourceCondition" + // OutdatedStatusError is a constant for a reason in a condition that indicates + // that the observed generation in a status is outdated. + OutdatedStatusError = "OutdatedStatus" ) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.conversion.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.conversion.go index 29e2326de..0924f52a9 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.conversion.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.conversion.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -411,6 +411,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*DataVolume)(nil), (*core.DataVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DataVolume_To_core_DataVolume(a.(*DataVolume), b.(*core.DataVolume), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.DataVolume)(nil), (*DataVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_DataVolume_To_v1beta1_DataVolume(a.(*core.DataVolume), b.(*DataVolume), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*Endpoint)(nil), (*core.Endpoint)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_Endpoint_To_core_Endpoint(a.(*Endpoint), b.(*core.Endpoint), scope) }); err != nil { @@ -481,6 +491,26 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*Ingress)(nil), (*core.Ingress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Ingress_To_core_Ingress(a.(*Ingress), b.(*core.Ingress), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Ingress)(nil), (*Ingress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Ingress_To_v1beta1_Ingress(a.(*core.Ingress), b.(*Ingress), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*IngressController)(nil), (*core.IngressController)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_IngressController_To_core_IngressController(a.(*IngressController), b.(*core.IngressController), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.IngressController)(nil), (*IngressController)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_IngressController_To_v1beta1_IngressController(a.(*core.IngressController), b.(*IngressController), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*KubeAPIServerConfig)(nil), (*core.KubeAPIServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_KubeAPIServerConfig_To_core_KubeAPIServerConfig(a.(*KubeAPIServerConfig), b.(*core.KubeAPIServerConfig), scope) }); err != nil { @@ -491,6 +521,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*KubeAPIServerRequests)(nil), (*core.KubeAPIServerRequests)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubeAPIServerRequests_To_core_KubeAPIServerRequests(a.(*KubeAPIServerRequests), b.(*core.KubeAPIServerRequests), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeAPIServerRequests)(nil), (*KubeAPIServerRequests)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeAPIServerRequests_To_v1beta1_KubeAPIServerRequests(a.(*core.KubeAPIServerRequests), b.(*KubeAPIServerRequests), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*KubeControllerManagerConfig)(nil), (*core.KubeControllerManagerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig(a.(*KubeControllerManagerConfig), b.(*core.KubeControllerManagerConfig), scope) }); err != nil { @@ -561,6 +601,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*KubeletConfigReserved)(nil), (*core.KubeletConfigReserved)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubeletConfigReserved_To_core_KubeletConfigReserved(a.(*KubeletConfigReserved), b.(*core.KubeletConfigReserved), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeletConfigReserved)(nil), (*KubeletConfigReserved)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeletConfigReserved_To_v1beta1_KubeletConfigReserved(a.(*core.KubeletConfigReserved), b.(*KubeletConfigReserved), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*Kubernetes)(nil), (*core.Kubernetes)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_Kubernetes_To_core_Kubernetes(a.(*Kubernetes), b.(*core.Kubernetes), scope) }); err != nil { @@ -641,6 +691,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*MachineControllerManagerSettings)(nil), (*core.MachineControllerManagerSettings)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings(a.(*MachineControllerManagerSettings), b.(*core.MachineControllerManagerSettings), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.MachineControllerManagerSettings)(nil), (*MachineControllerManagerSettings)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_MachineControllerManagerSettings_To_v1beta1_MachineControllerManagerSettings(a.(*core.MachineControllerManagerSettings), b.(*MachineControllerManagerSettings), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*MachineImage)(nil), (*core.MachineImage)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_MachineImage_To_core_MachineImage(a.(*MachineImage), b.(*core.MachineImage), scope) }); err != nil { @@ -651,6 +711,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*MachineImageVersion)(nil), (*core.MachineImageVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MachineImageVersion_To_core_MachineImageVersion(a.(*MachineImageVersion), b.(*core.MachineImageVersion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.MachineImageVersion)(nil), (*MachineImageVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_MachineImageVersion_To_v1beta1_MachineImageVersion(a.(*core.MachineImageVersion), b.(*MachineImageVersion), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*MachineType)(nil), (*core.MachineType)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_MachineType_To_core_MachineType(a.(*MachineType), b.(*core.MachineType), scope) }); err != nil { @@ -711,6 +781,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*NamedResourceReference)(nil), (*core.NamedResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_NamedResourceReference_To_core_NamedResourceReference(a.(*NamedResourceReference), b.(*core.NamedResourceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NamedResourceReference)(nil), (*NamedResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NamedResourceReference_To_v1beta1_NamedResourceReference(a.(*core.NamedResourceReference), b.(*NamedResourceReference), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*Networking)(nil), (*core.Networking)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_Networking_To_core_Networking(a.(*Networking), b.(*core.Networking), scope) }); err != nil { @@ -811,26 +891,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*ProjectMember)(nil), (*core.ProjectMember)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ProjectMember_To_core_ProjectMember(a.(*ProjectMember), b.(*core.ProjectMember), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*core.ProjectMember)(nil), (*ProjectMember)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ProjectMember_To_v1beta1_ProjectMember(a.(*core.ProjectMember), b.(*ProjectMember), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ProjectSpec)(nil), (*core.ProjectSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ProjectSpec_To_core_ProjectSpec(a.(*ProjectSpec), b.(*core.ProjectSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*core.ProjectSpec)(nil), (*ProjectSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ProjectSpec_To_v1beta1_ProjectSpec(a.(*core.ProjectSpec), b.(*ProjectSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*ProjectStatus)(nil), (*core.ProjectStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ProjectStatus_To_core_ProjectStatus(a.(*ProjectStatus), b.(*core.ProjectStatus), scope) }); err != nil { @@ -841,23 +901,23 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*Provider)(nil), (*core.Provider)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_Provider_To_core_Provider(a.(*Provider), b.(*core.Provider), scope) + if err := s.AddGeneratedConversionFunc((*ProjectTolerations)(nil), (*core.ProjectTolerations)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ProjectTolerations_To_core_ProjectTolerations(a.(*ProjectTolerations), b.(*core.ProjectTolerations), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.Provider)(nil), (*Provider)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_Provider_To_v1beta1_Provider(a.(*core.Provider), b.(*Provider), scope) + if err := s.AddGeneratedConversionFunc((*core.ProjectTolerations)(nil), (*ProjectTolerations)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ProjectTolerations_To_v1beta1_ProjectTolerations(a.(*core.ProjectTolerations), b.(*ProjectTolerations), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*ProviderConfig)(nil), (*core.ProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ProviderConfig_To_core_ProviderConfig(a.(*ProviderConfig), b.(*core.ProviderConfig), scope) + if err := s.AddGeneratedConversionFunc((*Provider)(nil), (*core.Provider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Provider_To_core_Provider(a.(*Provider), b.(*core.Provider), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*core.ProviderConfig)(nil), (*ProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_ProviderConfig_To_v1beta1_ProviderConfig(a.(*core.ProviderConfig), b.(*ProviderConfig), scope) + if err := s.AddGeneratedConversionFunc((*core.Provider)(nil), (*Provider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Provider_To_v1beta1_Provider(a.(*core.Provider), b.(*Provider), scope) }); err != nil { return err } @@ -901,6 +961,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*ResourceWatchCacheSize)(nil), (*core.ResourceWatchCacheSize)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize(a.(*ResourceWatchCacheSize), b.(*core.ResourceWatchCacheSize), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ResourceWatchCacheSize)(nil), (*ResourceWatchCacheSize)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ResourceWatchCacheSize_To_v1beta1_ResourceWatchCacheSize(a.(*core.ResourceWatchCacheSize), b.(*ResourceWatchCacheSize), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*SecretBinding)(nil), (*core.SecretBinding)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_SecretBinding_To_core_SecretBinding(a.(*SecretBinding), b.(*core.SecretBinding), scope) }); err != nil { @@ -951,6 +1021,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*SeedDNSProvider)(nil), (*core.SeedDNSProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SeedDNSProvider_To_core_SeedDNSProvider(a.(*SeedDNSProvider), b.(*core.SeedDNSProvider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedDNSProvider)(nil), (*SeedDNSProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedDNSProvider_To_v1beta1_SeedDNSProvider(a.(*core.SeedDNSProvider), b.(*SeedDNSProvider), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*SeedList)(nil), (*core.SeedList)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_SeedList_To_core_SeedList(a.(*SeedList), b.(*core.SeedList), scope) }); err != nil { @@ -981,6 +1061,76 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*SeedSelector)(nil), (*core.SeedSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SeedSelector_To_core_SeedSelector(a.(*SeedSelector), b.(*core.SeedSelector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedSelector)(nil), (*SeedSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedSelector_To_v1beta1_SeedSelector(a.(*core.SeedSelector), b.(*SeedSelector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedSettingExcessCapacityReservation)(nil), (*core.SeedSettingExcessCapacityReservation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation(a.(*SeedSettingExcessCapacityReservation), b.(*core.SeedSettingExcessCapacityReservation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedSettingExcessCapacityReservation)(nil), (*SeedSettingExcessCapacityReservation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedSettingExcessCapacityReservation_To_v1beta1_SeedSettingExcessCapacityReservation(a.(*core.SeedSettingExcessCapacityReservation), b.(*SeedSettingExcessCapacityReservation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedSettingLoadBalancerServices)(nil), (*core.SeedSettingLoadBalancerServices)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices(a.(*SeedSettingLoadBalancerServices), b.(*core.SeedSettingLoadBalancerServices), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedSettingLoadBalancerServices)(nil), (*SeedSettingLoadBalancerServices)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedSettingLoadBalancerServices_To_v1beta1_SeedSettingLoadBalancerServices(a.(*core.SeedSettingLoadBalancerServices), b.(*SeedSettingLoadBalancerServices), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedSettingScheduling)(nil), (*core.SeedSettingScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SeedSettingScheduling_To_core_SeedSettingScheduling(a.(*SeedSettingScheduling), b.(*core.SeedSettingScheduling), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedSettingScheduling)(nil), (*SeedSettingScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedSettingScheduling_To_v1beta1_SeedSettingScheduling(a.(*core.SeedSettingScheduling), b.(*SeedSettingScheduling), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedSettingShootDNS)(nil), (*core.SeedSettingShootDNS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SeedSettingShootDNS_To_core_SeedSettingShootDNS(a.(*SeedSettingShootDNS), b.(*core.SeedSettingShootDNS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedSettingShootDNS)(nil), (*SeedSettingShootDNS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedSettingShootDNS_To_v1beta1_SeedSettingShootDNS(a.(*core.SeedSettingShootDNS), b.(*SeedSettingShootDNS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedSettingVerticalPodAutoscaler)(nil), (*core.SeedSettingVerticalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler(a.(*SeedSettingVerticalPodAutoscaler), b.(*core.SeedSettingVerticalPodAutoscaler), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedSettingVerticalPodAutoscaler)(nil), (*SeedSettingVerticalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedSettingVerticalPodAutoscaler_To_v1beta1_SeedSettingVerticalPodAutoscaler(a.(*core.SeedSettingVerticalPodAutoscaler), b.(*SeedSettingVerticalPodAutoscaler), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedSettings)(nil), (*core.SeedSettings)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SeedSettings_To_core_SeedSettings(a.(*SeedSettings), b.(*core.SeedSettings), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedSettings)(nil), (*SeedSettings)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedSettings_To_v1beta1_SeedSettings(a.(*core.SeedSettings), b.(*SeedSettings), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*SeedSpec)(nil), (*core.SeedSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_SeedSpec_To_core_SeedSpec(a.(*SeedSpec), b.(*core.SeedSpec), scope) }); err != nil { @@ -1011,6 +1161,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*SeedTemplate)(nil), (*core.SeedTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SeedTemplate_To_core_SeedTemplate(a.(*SeedTemplate), b.(*core.SeedTemplate), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedTemplate)(nil), (*SeedTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedTemplate_To_v1beta1_SeedTemplate(a.(*core.SeedTemplate), b.(*SeedTemplate), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*SeedVolume)(nil), (*core.SeedVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_SeedVolume_To_core_SeedVolume(a.(*SeedVolume), b.(*core.SeedVolume), scope) }); err != nil { @@ -1101,6 +1261,26 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*Toleration)(nil), (*core.Toleration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Toleration_To_core_Toleration(a.(*Toleration), b.(*core.Toleration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Toleration)(nil), (*Toleration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Toleration_To_v1beta1_Toleration(a.(*core.Toleration), b.(*Toleration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*VerticalPodAutoscaler)(nil), (*core.VerticalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler(a.(*VerticalPodAutoscaler), b.(*core.VerticalPodAutoscaler), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.VerticalPodAutoscaler)(nil), (*VerticalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_VerticalPodAutoscaler_To_v1beta1_VerticalPodAutoscaler(a.(*core.VerticalPodAutoscaler), b.(*VerticalPodAutoscaler), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*Volume)(nil), (*core.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_Volume_To_core_Volume(a.(*Volume), b.(*core.Volume), scope) }); err != nil { @@ -1121,6 +1301,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*WatchCacheSizes)(nil), (*core.WatchCacheSizes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_WatchCacheSizes_To_core_WatchCacheSizes(a.(*WatchCacheSizes), b.(*core.WatchCacheSizes), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.WatchCacheSizes)(nil), (*WatchCacheSizes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_WatchCacheSizes_To_v1beta1_WatchCacheSizes(a.(*core.WatchCacheSizes), b.(*WatchCacheSizes), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*Worker)(nil), (*core.Worker)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_Worker_To_core_Worker(a.(*Worker), b.(*core.Worker), scope) }); err != nil { @@ -1141,6 +1331,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*WorkerSystemComponents)(nil), (*core.WorkerSystemComponents)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_WorkerSystemComponents_To_core_WorkerSystemComponents(a.(*WorkerSystemComponents), b.(*core.WorkerSystemComponents), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.WorkerSystemComponents)(nil), (*WorkerSystemComponents)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_WorkerSystemComponents_To_v1beta1_WorkerSystemComponents(a.(*core.WorkerSystemComponents), b.(*WorkerSystemComponents), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*core.ProjectMember)(nil), (*ProjectMember)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_core_ProjectMember_To_v1beta1_ProjectMember(a.(*core.ProjectMember), b.(*ProjectMember), scope) }); err != nil { @@ -1208,7 +1408,7 @@ func Convert_core_Addons_To_v1beta1_Addons(in *core.Addons, out *Addons, s conve func autoConvert_v1beta1_AdmissionPlugin_To_core_AdmissionPlugin(in *AdmissionPlugin, out *core.AdmissionPlugin, s conversion.Scope) error { out.Name = in.Name - out.Config = (*core.ProviderConfig)(unsafe.Pointer(in.Config)) + out.Config = (*runtime.RawExtension)(unsafe.Pointer(in.Config)) return nil } @@ -1219,7 +1419,7 @@ func Convert_v1beta1_AdmissionPlugin_To_core_AdmissionPlugin(in *AdmissionPlugin func autoConvert_core_AdmissionPlugin_To_v1beta1_AdmissionPlugin(in *core.AdmissionPlugin, out *AdmissionPlugin, s conversion.Scope) error { out.Name = in.Name - out.Config = (*ProviderConfig)(unsafe.Pointer(in.Config)) + out.Config = (*runtime.RawExtension)(unsafe.Pointer(in.Config)) return nil } @@ -1392,7 +1592,7 @@ func autoConvert_v1beta1_BackupBucketSpec_To_core_BackupBucketSpec(in *BackupBuc if err := Convert_v1beta1_BackupBucketProvider_To_core_BackupBucketProvider(&in.Provider, &out.Provider, s); err != nil { return err } - out.ProviderConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) out.SecretRef = in.SecretRef out.SeedName = (*string)(unsafe.Pointer(in.SeedName)) return nil @@ -1407,7 +1607,7 @@ func autoConvert_core_BackupBucketSpec_To_v1beta1_BackupBucketSpec(in *core.Back if err := Convert_core_BackupBucketProvider_To_v1beta1_BackupBucketProvider(&in.Provider, &out.Provider, s); err != nil { return err } - out.ProviderConfig = (*ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) out.SecretRef = in.SecretRef out.SeedName = (*string)(unsafe.Pointer(in.SeedName)) return nil @@ -1419,7 +1619,7 @@ func Convert_core_BackupBucketSpec_To_v1beta1_BackupBucketSpec(in *core.BackupBu } func autoConvert_v1beta1_BackupBucketStatus_To_core_BackupBucketStatus(in *BackupBucketStatus, out *core.BackupBucketStatus, s conversion.Scope) error { - out.ProviderStatus = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderStatus)) + out.ProviderStatus = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderStatus)) out.LastOperation = (*core.LastOperation)(unsafe.Pointer(in.LastOperation)) out.LastError = (*core.LastError)(unsafe.Pointer(in.LastError)) out.ObservedGeneration = in.ObservedGeneration @@ -1433,7 +1633,7 @@ func Convert_v1beta1_BackupBucketStatus_To_core_BackupBucketStatus(in *BackupBuc } func autoConvert_core_BackupBucketStatus_To_v1beta1_BackupBucketStatus(in *core.BackupBucketStatus, out *BackupBucketStatus, s conversion.Scope) error { - out.ProviderStatus = (*ProviderConfig)(unsafe.Pointer(in.ProviderStatus)) + out.ProviderStatus = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderStatus)) out.LastOperation = (*LastOperation)(unsafe.Pointer(in.LastOperation)) out.LastError = (*LastError)(unsafe.Pointer(in.LastError)) out.ObservedGeneration = in.ObservedGeneration @@ -1645,9 +1845,9 @@ func autoConvert_v1beta1_CloudProfileSpec_To_core_CloudProfileSpec(in *CloudProf } out.MachineImages = *(*[]core.MachineImage)(unsafe.Pointer(&in.MachineImages)) out.MachineTypes = *(*[]core.MachineType)(unsafe.Pointer(&in.MachineTypes)) - out.ProviderConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) out.Regions = *(*[]core.Region)(unsafe.Pointer(&in.Regions)) - out.SeedSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.SeedSelector)) + out.SeedSelector = (*core.SeedSelector)(unsafe.Pointer(in.SeedSelector)) out.Type = in.Type out.VolumeTypes = *(*[]core.VolumeType)(unsafe.Pointer(&in.VolumeTypes)) return nil @@ -1665,9 +1865,9 @@ func autoConvert_core_CloudProfileSpec_To_v1beta1_CloudProfileSpec(in *core.Clou } out.MachineImages = *(*[]MachineImage)(unsafe.Pointer(&in.MachineImages)) out.MachineTypes = *(*[]MachineType)(unsafe.Pointer(&in.MachineTypes)) - out.ProviderConfig = (*ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) out.Regions = *(*[]Region)(unsafe.Pointer(&in.Regions)) - out.SeedSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.SeedSelector)) + out.SeedSelector = (*SeedSelector)(unsafe.Pointer(in.SeedSelector)) out.Type = in.Type out.VolumeTypes = *(*[]VolumeType)(unsafe.Pointer(&in.VolumeTypes)) return nil @@ -1745,6 +1945,7 @@ func autoConvert_v1beta1_Condition_To_core_Condition(in *Condition, out *core.Co out.LastUpdateTime = in.LastUpdateTime out.Reason = in.Reason out.Message = in.Message + out.Codes = *(*[]core.ErrorCode)(unsafe.Pointer(&in.Codes)) return nil } @@ -1760,6 +1961,7 @@ func autoConvert_core_Condition_To_v1beta1_Condition(in *core.Condition, out *Co out.LastUpdateTime = in.LastUpdateTime out.Reason = in.Reason out.Message = in.Message + out.Codes = *(*[]ErrorCode)(unsafe.Pointer(&in.Codes)) return nil } @@ -1770,7 +1972,7 @@ func Convert_core_Condition_To_v1beta1_Condition(in *core.Condition, out *Condit func autoConvert_v1beta1_ContainerRuntime_To_core_ContainerRuntime(in *ContainerRuntime, out *core.ContainerRuntime, s conversion.Scope) error { out.Type = in.Type - out.ProviderConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) return nil } @@ -1781,7 +1983,7 @@ func Convert_v1beta1_ContainerRuntime_To_core_ContainerRuntime(in *ContainerRunt func autoConvert_core_ContainerRuntime_To_v1beta1_ContainerRuntime(in *core.ContainerRuntime, out *ContainerRuntime, s conversion.Scope) error { out.Type = in.Type - out.ProviderConfig = (*ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) return nil } @@ -1792,7 +1994,9 @@ func Convert_core_ContainerRuntime_To_v1beta1_ContainerRuntime(in *core.Containe func autoConvert_v1beta1_ControllerDeployment_To_core_ControllerDeployment(in *ControllerDeployment, out *core.ControllerDeployment, s conversion.Scope) error { out.Type = in.Type - out.ProviderConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.Policy = (*core.ControllerDeploymentPolicy)(unsafe.Pointer(in.Policy)) + out.SeedSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.SeedSelector)) return nil } @@ -1803,7 +2007,9 @@ func Convert_v1beta1_ControllerDeployment_To_core_ControllerDeployment(in *Contr func autoConvert_core_ControllerDeployment_To_v1beta1_ControllerDeployment(in *core.ControllerDeployment, out *ControllerDeployment, s conversion.Scope) error { out.Type = in.Type - out.ProviderConfig = (*ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.Policy = (*ControllerDeploymentPolicy)(unsafe.Pointer(in.Policy)) + out.SeedSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.SeedSelector)) return nil } @@ -1890,7 +2096,7 @@ func Convert_core_ControllerInstallationSpec_To_v1beta1_ControllerInstallationSp func autoConvert_v1beta1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(in *ControllerInstallationStatus, out *core.ControllerInstallationStatus, s conversion.Scope) error { out.Conditions = *(*[]core.Condition)(unsafe.Pointer(&in.Conditions)) - out.ProviderStatus = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderStatus)) + out.ProviderStatus = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderStatus)) return nil } @@ -1901,7 +2107,7 @@ func Convert_v1beta1_ControllerInstallationStatus_To_core_ControllerInstallation func autoConvert_core_ControllerInstallationStatus_To_v1beta1_ControllerInstallationStatus(in *core.ControllerInstallationStatus, out *ControllerInstallationStatus, s conversion.Scope) error { out.Conditions = *(*[]Condition)(unsafe.Pointer(&in.Conditions)) - out.ProviderStatus = (*ProviderConfig)(unsafe.Pointer(in.ProviderStatus)) + out.ProviderStatus = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderStatus)) return nil } @@ -1985,6 +2191,7 @@ func autoConvert_v1beta1_ControllerResource_To_core_ControllerResource(in *Contr out.Type = in.Type out.GloballyEnabled = (*bool)(unsafe.Pointer(in.GloballyEnabled)) out.ReconcileTimeout = (*metav1.Duration)(unsafe.Pointer(in.ReconcileTimeout)) + out.Primary = (*bool)(unsafe.Pointer(in.Primary)) return nil } @@ -1998,6 +2205,7 @@ func autoConvert_core_ControllerResource_To_v1beta1_ControllerResource(in *core. out.Type = in.Type out.GloballyEnabled = (*bool)(unsafe.Pointer(in.GloballyEnabled)) out.ReconcileTimeout = (*metav1.Duration)(unsafe.Pointer(in.ReconcileTimeout)) + out.Primary = (*bool)(unsafe.Pointer(in.Primary)) return nil } @@ -2078,6 +2286,32 @@ func Convert_core_DNSProvider_To_v1beta1_DNSProvider(in *core.DNSProvider, out * return autoConvert_core_DNSProvider_To_v1beta1_DNSProvider(in, out, s) } +func autoConvert_v1beta1_DataVolume_To_core_DataVolume(in *DataVolume, out *core.DataVolume, s conversion.Scope) error { + out.Name = in.Name + out.Type = (*string)(unsafe.Pointer(in.Type)) + out.VolumeSize = in.VolumeSize + out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted)) + return nil +} + +// Convert_v1beta1_DataVolume_To_core_DataVolume is an autogenerated conversion function. +func Convert_v1beta1_DataVolume_To_core_DataVolume(in *DataVolume, out *core.DataVolume, s conversion.Scope) error { + return autoConvert_v1beta1_DataVolume_To_core_DataVolume(in, out, s) +} + +func autoConvert_core_DataVolume_To_v1beta1_DataVolume(in *core.DataVolume, out *DataVolume, s conversion.Scope) error { + out.Name = in.Name + out.Type = (*string)(unsafe.Pointer(in.Type)) + out.VolumeSize = in.VolumeSize + out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted)) + return nil +} + +// Convert_core_DataVolume_To_v1beta1_DataVolume is an autogenerated conversion function. +func Convert_core_DataVolume_To_v1beta1_DataVolume(in *core.DataVolume, out *DataVolume, s conversion.Scope) error { + return autoConvert_core_DataVolume_To_v1beta1_DataVolume(in, out, s) +} + func autoConvert_v1beta1_Endpoint_To_core_Endpoint(in *Endpoint, out *core.Endpoint, s conversion.Scope) error { out.Name = in.Name out.URL = in.URL @@ -2128,7 +2362,8 @@ func Convert_core_ExpirableVersion_To_v1beta1_ExpirableVersion(in *core.Expirabl func autoConvert_v1beta1_Extension_To_core_Extension(in *Extension, out *core.Extension, s conversion.Scope) error { out.Type = in.Type - out.ProviderConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.Disabled = (*bool)(unsafe.Pointer(in.Disabled)) return nil } @@ -2139,7 +2374,8 @@ func Convert_v1beta1_Extension_To_core_Extension(in *Extension, out *core.Extens func autoConvert_core_Extension_To_v1beta1_Extension(in *core.Extension, out *Extension, s conversion.Scope) error { out.Type = in.Type - out.ProviderConfig = (*ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.Disabled = (*bool)(unsafe.Pointer(in.Disabled)) return nil } @@ -2250,6 +2486,54 @@ func Convert_core_HorizontalPodAutoscalerConfig_To_v1beta1_HorizontalPodAutoscal return autoConvert_core_HorizontalPodAutoscalerConfig_To_v1beta1_HorizontalPodAutoscalerConfig(in, out, s) } +func autoConvert_v1beta1_Ingress_To_core_Ingress(in *Ingress, out *core.Ingress, s conversion.Scope) error { + out.Domain = in.Domain + if err := Convert_v1beta1_IngressController_To_core_IngressController(&in.Controller, &out.Controller, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_Ingress_To_core_Ingress is an autogenerated conversion function. +func Convert_v1beta1_Ingress_To_core_Ingress(in *Ingress, out *core.Ingress, s conversion.Scope) error { + return autoConvert_v1beta1_Ingress_To_core_Ingress(in, out, s) +} + +func autoConvert_core_Ingress_To_v1beta1_Ingress(in *core.Ingress, out *Ingress, s conversion.Scope) error { + out.Domain = in.Domain + if err := Convert_core_IngressController_To_v1beta1_IngressController(&in.Controller, &out.Controller, s); err != nil { + return err + } + return nil +} + +// Convert_core_Ingress_To_v1beta1_Ingress is an autogenerated conversion function. +func Convert_core_Ingress_To_v1beta1_Ingress(in *core.Ingress, out *Ingress, s conversion.Scope) error { + return autoConvert_core_Ingress_To_v1beta1_Ingress(in, out, s) +} + +func autoConvert_v1beta1_IngressController_To_core_IngressController(in *IngressController, out *core.IngressController, s conversion.Scope) error { + out.Kind = in.Kind + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + return nil +} + +// Convert_v1beta1_IngressController_To_core_IngressController is an autogenerated conversion function. +func Convert_v1beta1_IngressController_To_core_IngressController(in *IngressController, out *core.IngressController, s conversion.Scope) error { + return autoConvert_v1beta1_IngressController_To_core_IngressController(in, out, s) +} + +func autoConvert_core_IngressController_To_v1beta1_IngressController(in *core.IngressController, out *IngressController, s conversion.Scope) error { + out.Kind = in.Kind + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + return nil +} + +// Convert_core_IngressController_To_v1beta1_IngressController is an autogenerated conversion function. +func Convert_core_IngressController_To_v1beta1_IngressController(in *core.IngressController, out *IngressController, s conversion.Scope) error { + return autoConvert_core_IngressController_To_v1beta1_IngressController(in, out, s) +} + func autoConvert_v1beta1_KubeAPIServerConfig_To_core_KubeAPIServerConfig(in *KubeAPIServerConfig, out *core.KubeAPIServerConfig, s conversion.Scope) error { if err := Convert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { return err @@ -2261,6 +2545,8 @@ func autoConvert_v1beta1_KubeAPIServerConfig_To_core_KubeAPIServerConfig(in *Kub out.OIDCConfig = (*core.OIDCConfig)(unsafe.Pointer(in.OIDCConfig)) out.RuntimeConfig = *(*map[string]bool)(unsafe.Pointer(&in.RuntimeConfig)) out.ServiceAccountConfig = (*core.ServiceAccountConfig)(unsafe.Pointer(in.ServiceAccountConfig)) + out.WatchCacheSizes = (*core.WatchCacheSizes)(unsafe.Pointer(in.WatchCacheSizes)) + out.Requests = (*core.KubeAPIServerRequests)(unsafe.Pointer(in.Requests)) return nil } @@ -2280,6 +2566,8 @@ func autoConvert_core_KubeAPIServerConfig_To_v1beta1_KubeAPIServerConfig(in *cor out.OIDCConfig = (*OIDCConfig)(unsafe.Pointer(in.OIDCConfig)) out.RuntimeConfig = *(*map[string]bool)(unsafe.Pointer(&in.RuntimeConfig)) out.ServiceAccountConfig = (*ServiceAccountConfig)(unsafe.Pointer(in.ServiceAccountConfig)) + out.WatchCacheSizes = (*WatchCacheSizes)(unsafe.Pointer(in.WatchCacheSizes)) + out.Requests = (*KubeAPIServerRequests)(unsafe.Pointer(in.Requests)) return nil } @@ -2288,12 +2576,35 @@ func Convert_core_KubeAPIServerConfig_To_v1beta1_KubeAPIServerConfig(in *core.Ku return autoConvert_core_KubeAPIServerConfig_To_v1beta1_KubeAPIServerConfig(in, out, s) } +func autoConvert_v1beta1_KubeAPIServerRequests_To_core_KubeAPIServerRequests(in *KubeAPIServerRequests, out *core.KubeAPIServerRequests, s conversion.Scope) error { + out.MaxNonMutatingInflight = (*int32)(unsafe.Pointer(in.MaxNonMutatingInflight)) + out.MaxMutatingInflight = (*int32)(unsafe.Pointer(in.MaxMutatingInflight)) + return nil +} + +// Convert_v1beta1_KubeAPIServerRequests_To_core_KubeAPIServerRequests is an autogenerated conversion function. +func Convert_v1beta1_KubeAPIServerRequests_To_core_KubeAPIServerRequests(in *KubeAPIServerRequests, out *core.KubeAPIServerRequests, s conversion.Scope) error { + return autoConvert_v1beta1_KubeAPIServerRequests_To_core_KubeAPIServerRequests(in, out, s) +} + +func autoConvert_core_KubeAPIServerRequests_To_v1beta1_KubeAPIServerRequests(in *core.KubeAPIServerRequests, out *KubeAPIServerRequests, s conversion.Scope) error { + out.MaxNonMutatingInflight = (*int32)(unsafe.Pointer(in.MaxNonMutatingInflight)) + out.MaxMutatingInflight = (*int32)(unsafe.Pointer(in.MaxMutatingInflight)) + return nil +} + +// Convert_core_KubeAPIServerRequests_To_v1beta1_KubeAPIServerRequests is an autogenerated conversion function. +func Convert_core_KubeAPIServerRequests_To_v1beta1_KubeAPIServerRequests(in *core.KubeAPIServerRequests, out *KubeAPIServerRequests, s conversion.Scope) error { + return autoConvert_core_KubeAPIServerRequests_To_v1beta1_KubeAPIServerRequests(in, out, s) +} + func autoConvert_v1beta1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig(in *KubeControllerManagerConfig, out *core.KubeControllerManagerConfig, s conversion.Scope) error { if err := Convert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { return err } out.HorizontalPodAutoscalerConfig = (*core.HorizontalPodAutoscalerConfig)(unsafe.Pointer(in.HorizontalPodAutoscalerConfig)) out.NodeCIDRMaskSize = (*int32)(unsafe.Pointer(in.NodeCIDRMaskSize)) + out.PodEvictionTimeout = (*metav1.Duration)(unsafe.Pointer(in.PodEvictionTimeout)) return nil } @@ -2308,6 +2619,7 @@ func autoConvert_core_KubeControllerManagerConfig_To_v1beta1_KubeControllerManag } out.HorizontalPodAutoscalerConfig = (*HorizontalPodAutoscalerConfig)(unsafe.Pointer(in.HorizontalPodAutoscalerConfig)) out.NodeCIDRMaskSize = (*int32)(unsafe.Pointer(in.NodeCIDRMaskSize)) + out.PodEvictionTimeout = (*metav1.Duration)(unsafe.Pointer(in.PodEvictionTimeout)) return nil } @@ -2346,6 +2658,7 @@ func autoConvert_v1beta1_KubeSchedulerConfig_To_core_KubeSchedulerConfig(in *Kub if err := Convert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { return err } + out.KubeMaxPDVols = (*string)(unsafe.Pointer(in.KubeMaxPDVols)) return nil } @@ -2358,6 +2671,7 @@ func autoConvert_core_KubeSchedulerConfig_To_v1beta1_KubeSchedulerConfig(in *cor if err := Convert_core_KubernetesConfig_To_v1beta1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { return err } + out.KubeMaxPDVols = (*string)(unsafe.Pointer(in.KubeMaxPDVols)) return nil } @@ -2381,6 +2695,9 @@ func autoConvert_v1beta1_KubeletConfig_To_core_KubeletConfig(in *KubeletConfig, out.MaxPods = (*int32)(unsafe.Pointer(in.MaxPods)) out.PodPIDsLimit = (*int64)(unsafe.Pointer(in.PodPIDsLimit)) out.ImagePullProgressDeadline = (*metav1.Duration)(unsafe.Pointer(in.ImagePullProgressDeadline)) + out.FailSwapOn = (*bool)(unsafe.Pointer(in.FailSwapOn)) + out.KubeReserved = (*core.KubeletConfigReserved)(unsafe.Pointer(in.KubeReserved)) + out.SystemReserved = (*core.KubeletConfigReserved)(unsafe.Pointer(in.SystemReserved)) return nil } @@ -2404,6 +2721,9 @@ func autoConvert_core_KubeletConfig_To_v1beta1_KubeletConfig(in *core.KubeletCon out.MaxPods = (*int32)(unsafe.Pointer(in.MaxPods)) out.PodPIDsLimit = (*int64)(unsafe.Pointer(in.PodPIDsLimit)) out.ImagePullProgressDeadline = (*metav1.Duration)(unsafe.Pointer(in.ImagePullProgressDeadline)) + out.FailSwapOn = (*bool)(unsafe.Pointer(in.FailSwapOn)) + out.KubeReserved = (*KubeletConfigReserved)(unsafe.Pointer(in.KubeReserved)) + out.SystemReserved = (*KubeletConfigReserved)(unsafe.Pointer(in.SystemReserved)) return nil } @@ -2496,6 +2816,32 @@ func Convert_core_KubeletConfigEvictionSoftGracePeriod_To_v1beta1_KubeletConfigE return autoConvert_core_KubeletConfigEvictionSoftGracePeriod_To_v1beta1_KubeletConfigEvictionSoftGracePeriod(in, out, s) } +func autoConvert_v1beta1_KubeletConfigReserved_To_core_KubeletConfigReserved(in *KubeletConfigReserved, out *core.KubeletConfigReserved, s conversion.Scope) error { + out.CPU = (*resource.Quantity)(unsafe.Pointer(in.CPU)) + out.Memory = (*resource.Quantity)(unsafe.Pointer(in.Memory)) + out.EphemeralStorage = (*resource.Quantity)(unsafe.Pointer(in.EphemeralStorage)) + out.PID = (*resource.Quantity)(unsafe.Pointer(in.PID)) + return nil +} + +// Convert_v1beta1_KubeletConfigReserved_To_core_KubeletConfigReserved is an autogenerated conversion function. +func Convert_v1beta1_KubeletConfigReserved_To_core_KubeletConfigReserved(in *KubeletConfigReserved, out *core.KubeletConfigReserved, s conversion.Scope) error { + return autoConvert_v1beta1_KubeletConfigReserved_To_core_KubeletConfigReserved(in, out, s) +} + +func autoConvert_core_KubeletConfigReserved_To_v1beta1_KubeletConfigReserved(in *core.KubeletConfigReserved, out *KubeletConfigReserved, s conversion.Scope) error { + out.CPU = (*resource.Quantity)(unsafe.Pointer(in.CPU)) + out.Memory = (*resource.Quantity)(unsafe.Pointer(in.Memory)) + out.EphemeralStorage = (*resource.Quantity)(unsafe.Pointer(in.EphemeralStorage)) + out.PID = (*resource.Quantity)(unsafe.Pointer(in.PID)) + return nil +} + +// Convert_core_KubeletConfigReserved_To_v1beta1_KubeletConfigReserved is an autogenerated conversion function. +func Convert_core_KubeletConfigReserved_To_v1beta1_KubeletConfigReserved(in *core.KubeletConfigReserved, out *KubeletConfigReserved, s conversion.Scope) error { + return autoConvert_core_KubeletConfigReserved_To_v1beta1_KubeletConfigReserved(in, out, s) +} + func autoConvert_v1beta1_Kubernetes_To_core_Kubernetes(in *Kubernetes, out *core.Kubernetes, s conversion.Scope) error { out.AllowPrivilegedContainers = (*bool)(unsafe.Pointer(in.AllowPrivilegedContainers)) out.ClusterAutoscaler = (*core.ClusterAutoscaler)(unsafe.Pointer(in.ClusterAutoscaler)) @@ -2505,6 +2851,7 @@ func autoConvert_v1beta1_Kubernetes_To_core_Kubernetes(in *Kubernetes, out *core out.KubeProxy = (*core.KubeProxyConfig)(unsafe.Pointer(in.KubeProxy)) out.Kubelet = (*core.KubeletConfig)(unsafe.Pointer(in.Kubelet)) out.Version = in.Version + out.VerticalPodAutoscaler = (*core.VerticalPodAutoscaler)(unsafe.Pointer(in.VerticalPodAutoscaler)) return nil } @@ -2522,6 +2869,7 @@ func autoConvert_core_Kubernetes_To_v1beta1_Kubernetes(in *core.Kubernetes, out out.KubeProxy = (*KubeProxyConfig)(unsafe.Pointer(in.KubeProxy)) out.Kubelet = (*KubeletConfig)(unsafe.Pointer(in.Kubelet)) out.Version = in.Version + out.VerticalPodAutoscaler = (*VerticalPodAutoscaler)(unsafe.Pointer(in.VerticalPodAutoscaler)) return nil } @@ -2708,9 +3056,37 @@ func Convert_core_Machine_To_v1beta1_Machine(in *core.Machine, out *Machine, s c return autoConvert_core_Machine_To_v1beta1_Machine(in, out, s) } +func autoConvert_v1beta1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings(in *MachineControllerManagerSettings, out *core.MachineControllerManagerSettings, s conversion.Scope) error { + out.MachineDrainTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineDrainTimeout)) + out.MachineHealthTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineHealthTimeout)) + out.MachineCreationTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineCreationTimeout)) + out.MaxEvictRetries = (*int32)(unsafe.Pointer(in.MaxEvictRetries)) + out.NodeConditions = *(*[]string)(unsafe.Pointer(&in.NodeConditions)) + return nil +} + +// Convert_v1beta1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings is an autogenerated conversion function. +func Convert_v1beta1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings(in *MachineControllerManagerSettings, out *core.MachineControllerManagerSettings, s conversion.Scope) error { + return autoConvert_v1beta1_MachineControllerManagerSettings_To_core_MachineControllerManagerSettings(in, out, s) +} + +func autoConvert_core_MachineControllerManagerSettings_To_v1beta1_MachineControllerManagerSettings(in *core.MachineControllerManagerSettings, out *MachineControllerManagerSettings, s conversion.Scope) error { + out.MachineDrainTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineDrainTimeout)) + out.MachineHealthTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineHealthTimeout)) + out.MachineCreationTimeout = (*metav1.Duration)(unsafe.Pointer(in.MachineCreationTimeout)) + out.MaxEvictRetries = (*int32)(unsafe.Pointer(in.MaxEvictRetries)) + out.NodeConditions = *(*[]string)(unsafe.Pointer(&in.NodeConditions)) + return nil +} + +// Convert_core_MachineControllerManagerSettings_To_v1beta1_MachineControllerManagerSettings is an autogenerated conversion function. +func Convert_core_MachineControllerManagerSettings_To_v1beta1_MachineControllerManagerSettings(in *core.MachineControllerManagerSettings, out *MachineControllerManagerSettings, s conversion.Scope) error { + return autoConvert_core_MachineControllerManagerSettings_To_v1beta1_MachineControllerManagerSettings(in, out, s) +} + func autoConvert_v1beta1_MachineImage_To_core_MachineImage(in *MachineImage, out *core.MachineImage, s conversion.Scope) error { out.Name = in.Name - out.Versions = *(*[]core.ExpirableVersion)(unsafe.Pointer(&in.Versions)) + out.Versions = *(*[]core.MachineImageVersion)(unsafe.Pointer(&in.Versions)) return nil } @@ -2721,7 +3097,7 @@ func Convert_v1beta1_MachineImage_To_core_MachineImage(in *MachineImage, out *co func autoConvert_core_MachineImage_To_v1beta1_MachineImage(in *core.MachineImage, out *MachineImage, s conversion.Scope) error { out.Name = in.Name - out.Versions = *(*[]ExpirableVersion)(unsafe.Pointer(&in.Versions)) + out.Versions = *(*[]MachineImageVersion)(unsafe.Pointer(&in.Versions)) return nil } @@ -2730,6 +3106,32 @@ func Convert_core_MachineImage_To_v1beta1_MachineImage(in *core.MachineImage, ou return autoConvert_core_MachineImage_To_v1beta1_MachineImage(in, out, s) } +func autoConvert_v1beta1_MachineImageVersion_To_core_MachineImageVersion(in *MachineImageVersion, out *core.MachineImageVersion, s conversion.Scope) error { + if err := Convert_v1beta1_ExpirableVersion_To_core_ExpirableVersion(&in.ExpirableVersion, &out.ExpirableVersion, s); err != nil { + return err + } + out.CRI = *(*[]core.CRI)(unsafe.Pointer(&in.CRI)) + return nil +} + +// Convert_v1beta1_MachineImageVersion_To_core_MachineImageVersion is an autogenerated conversion function. +func Convert_v1beta1_MachineImageVersion_To_core_MachineImageVersion(in *MachineImageVersion, out *core.MachineImageVersion, s conversion.Scope) error { + return autoConvert_v1beta1_MachineImageVersion_To_core_MachineImageVersion(in, out, s) +} + +func autoConvert_core_MachineImageVersion_To_v1beta1_MachineImageVersion(in *core.MachineImageVersion, out *MachineImageVersion, s conversion.Scope) error { + if err := Convert_core_ExpirableVersion_To_v1beta1_ExpirableVersion(&in.ExpirableVersion, &out.ExpirableVersion, s); err != nil { + return err + } + out.CRI = *(*[]CRI)(unsafe.Pointer(&in.CRI)) + return nil +} + +// Convert_core_MachineImageVersion_To_v1beta1_MachineImageVersion is an autogenerated conversion function. +func Convert_core_MachineImageVersion_To_v1beta1_MachineImageVersion(in *core.MachineImageVersion, out *MachineImageVersion, s conversion.Scope) error { + return autoConvert_core_MachineImageVersion_To_v1beta1_MachineImageVersion(in, out, s) +} + func autoConvert_v1beta1_MachineType_To_core_MachineType(in *MachineType, out *core.MachineType, s conversion.Scope) error { out.CPU = in.CPU out.GPU = in.GPU @@ -2787,6 +3189,7 @@ func Convert_core_MachineTypeStorage_To_v1beta1_MachineTypeStorage(in *core.Mach func autoConvert_v1beta1_Maintenance_To_core_Maintenance(in *Maintenance, out *core.Maintenance, s conversion.Scope) error { out.AutoUpdate = (*core.MaintenanceAutoUpdate)(unsafe.Pointer(in.AutoUpdate)) out.TimeWindow = (*core.MaintenanceTimeWindow)(unsafe.Pointer(in.TimeWindow)) + out.ConfineSpecUpdateRollout = (*bool)(unsafe.Pointer(in.ConfineSpecUpdateRollout)) return nil } @@ -2798,6 +3201,7 @@ func Convert_v1beta1_Maintenance_To_core_Maintenance(in *Maintenance, out *core. func autoConvert_core_Maintenance_To_v1beta1_Maintenance(in *core.Maintenance, out *Maintenance, s conversion.Scope) error { out.AutoUpdate = (*MaintenanceAutoUpdate)(unsafe.Pointer(in.AutoUpdate)) out.TimeWindow = (*MaintenanceTimeWindow)(unsafe.Pointer(in.TimeWindow)) + out.ConfineSpecUpdateRollout = (*bool)(unsafe.Pointer(in.ConfineSpecUpdateRollout)) return nil } @@ -2870,9 +3274,31 @@ func Convert_core_Monitoring_To_v1beta1_Monitoring(in *core.Monitoring, out *Mon return autoConvert_core_Monitoring_To_v1beta1_Monitoring(in, out, s) } +func autoConvert_v1beta1_NamedResourceReference_To_core_NamedResourceReference(in *NamedResourceReference, out *core.NamedResourceReference, s conversion.Scope) error { + out.Name = in.Name + out.ResourceRef = in.ResourceRef + return nil +} + +// Convert_v1beta1_NamedResourceReference_To_core_NamedResourceReference is an autogenerated conversion function. +func Convert_v1beta1_NamedResourceReference_To_core_NamedResourceReference(in *NamedResourceReference, out *core.NamedResourceReference, s conversion.Scope) error { + return autoConvert_v1beta1_NamedResourceReference_To_core_NamedResourceReference(in, out, s) +} + +func autoConvert_core_NamedResourceReference_To_v1beta1_NamedResourceReference(in *core.NamedResourceReference, out *NamedResourceReference, s conversion.Scope) error { + out.Name = in.Name + out.ResourceRef = in.ResourceRef + return nil +} + +// Convert_core_NamedResourceReference_To_v1beta1_NamedResourceReference is an autogenerated conversion function. +func Convert_core_NamedResourceReference_To_v1beta1_NamedResourceReference(in *core.NamedResourceReference, out *NamedResourceReference, s conversion.Scope) error { + return autoConvert_core_NamedResourceReference_To_v1beta1_NamedResourceReference(in, out, s) +} + func autoConvert_v1beta1_Networking_To_core_Networking(in *Networking, out *core.Networking, s conversion.Scope) error { out.Type = in.Type - out.ProviderConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) out.Pods = (*string)(unsafe.Pointer(in.Pods)) out.Nodes = (*string)(unsafe.Pointer(in.Nodes)) out.Services = (*string)(unsafe.Pointer(in.Services)) @@ -2886,7 +3312,7 @@ func Convert_v1beta1_Networking_To_core_Networking(in *Networking, out *core.Net func autoConvert_core_Networking_To_v1beta1_Networking(in *core.Networking, out *Networking, s conversion.Scope) error { out.Type = in.Type - out.ProviderConfig = (*ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) out.Pods = (*string)(unsafe.Pointer(in.Pods)) out.Nodes = (*string)(unsafe.Pointer(in.Nodes)) out.Services = (*string)(unsafe.Pointer(in.Services)) @@ -3192,6 +3618,7 @@ func autoConvert_v1beta1_ProjectSpec_To_core_ProjectSpec(in *ProjectSpec, out *c out.Members = nil } out.Namespace = (*string)(unsafe.Pointer(in.Namespace)) + out.Tolerations = (*core.ProjectTolerations)(unsafe.Pointer(in.Tolerations)) return nil } @@ -3212,12 +3639,15 @@ func autoConvert_core_ProjectSpec_To_v1beta1_ProjectSpec(in *core.ProjectSpec, o out.Members = nil } out.Namespace = (*string)(unsafe.Pointer(in.Namespace)) + out.Tolerations = (*ProjectTolerations)(unsafe.Pointer(in.Tolerations)) return nil } func autoConvert_v1beta1_ProjectStatus_To_core_ProjectStatus(in *ProjectStatus, out *core.ProjectStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Phase = core.ProjectPhase(in.Phase) + out.StaleSinceTimestamp = (*metav1.Time)(unsafe.Pointer(in.StaleSinceTimestamp)) + out.StaleAutoDeleteTimestamp = (*metav1.Time)(unsafe.Pointer(in.StaleAutoDeleteTimestamp)) return nil } @@ -3229,6 +3659,8 @@ func Convert_v1beta1_ProjectStatus_To_core_ProjectStatus(in *ProjectStatus, out func autoConvert_core_ProjectStatus_To_v1beta1_ProjectStatus(in *core.ProjectStatus, out *ProjectStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration out.Phase = ProjectPhase(in.Phase) + out.StaleSinceTimestamp = (*metav1.Time)(unsafe.Pointer(in.StaleSinceTimestamp)) + out.StaleAutoDeleteTimestamp = (*metav1.Time)(unsafe.Pointer(in.StaleAutoDeleteTimestamp)) return nil } @@ -3237,10 +3669,32 @@ func Convert_core_ProjectStatus_To_v1beta1_ProjectStatus(in *core.ProjectStatus, return autoConvert_core_ProjectStatus_To_v1beta1_ProjectStatus(in, out, s) } +func autoConvert_v1beta1_ProjectTolerations_To_core_ProjectTolerations(in *ProjectTolerations, out *core.ProjectTolerations, s conversion.Scope) error { + out.Defaults = *(*[]core.Toleration)(unsafe.Pointer(&in.Defaults)) + out.Whitelist = *(*[]core.Toleration)(unsafe.Pointer(&in.Whitelist)) + return nil +} + +// Convert_v1beta1_ProjectTolerations_To_core_ProjectTolerations is an autogenerated conversion function. +func Convert_v1beta1_ProjectTolerations_To_core_ProjectTolerations(in *ProjectTolerations, out *core.ProjectTolerations, s conversion.Scope) error { + return autoConvert_v1beta1_ProjectTolerations_To_core_ProjectTolerations(in, out, s) +} + +func autoConvert_core_ProjectTolerations_To_v1beta1_ProjectTolerations(in *core.ProjectTolerations, out *ProjectTolerations, s conversion.Scope) error { + out.Defaults = *(*[]Toleration)(unsafe.Pointer(&in.Defaults)) + out.Whitelist = *(*[]Toleration)(unsafe.Pointer(&in.Whitelist)) + return nil +} + +// Convert_core_ProjectTolerations_To_v1beta1_ProjectTolerations is an autogenerated conversion function. +func Convert_core_ProjectTolerations_To_v1beta1_ProjectTolerations(in *core.ProjectTolerations, out *ProjectTolerations, s conversion.Scope) error { + return autoConvert_core_ProjectTolerations_To_v1beta1_ProjectTolerations(in, out, s) +} + func autoConvert_v1beta1_Provider_To_core_Provider(in *Provider, out *core.Provider, s conversion.Scope) error { out.Type = in.Type - out.ControlPlaneConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ControlPlaneConfig)) - out.InfrastructureConfig = (*core.ProviderConfig)(unsafe.Pointer(in.InfrastructureConfig)) + out.ControlPlaneConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ControlPlaneConfig)) + out.InfrastructureConfig = (*runtime.RawExtension)(unsafe.Pointer(in.InfrastructureConfig)) if in.Workers != nil { in, out := &in.Workers, &out.Workers *out = make([]core.Worker, len(*in)) @@ -3262,8 +3716,8 @@ func Convert_v1beta1_Provider_To_core_Provider(in *Provider, out *core.Provider, func autoConvert_core_Provider_To_v1beta1_Provider(in *core.Provider, out *Provider, s conversion.Scope) error { out.Type = in.Type - out.ControlPlaneConfig = (*ProviderConfig)(unsafe.Pointer(in.ControlPlaneConfig)) - out.InfrastructureConfig = (*ProviderConfig)(unsafe.Pointer(in.InfrastructureConfig)) + out.ControlPlaneConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ControlPlaneConfig)) + out.InfrastructureConfig = (*runtime.RawExtension)(unsafe.Pointer(in.InfrastructureConfig)) if in.Workers != nil { in, out := &in.Workers, &out.Workers *out = make([]Worker, len(*in)) @@ -3283,26 +3737,6 @@ func Convert_core_Provider_To_v1beta1_Provider(in *core.Provider, out *Provider, return autoConvert_core_Provider_To_v1beta1_Provider(in, out, s) } -func autoConvert_v1beta1_ProviderConfig_To_core_ProviderConfig(in *ProviderConfig, out *core.ProviderConfig, s conversion.Scope) error { - out.RawExtension = in.RawExtension - return nil -} - -// Convert_v1beta1_ProviderConfig_To_core_ProviderConfig is an autogenerated conversion function. -func Convert_v1beta1_ProviderConfig_To_core_ProviderConfig(in *ProviderConfig, out *core.ProviderConfig, s conversion.Scope) error { - return autoConvert_v1beta1_ProviderConfig_To_core_ProviderConfig(in, out, s) -} - -func autoConvert_core_ProviderConfig_To_v1beta1_ProviderConfig(in *core.ProviderConfig, out *ProviderConfig, s conversion.Scope) error { - out.RawExtension = in.RawExtension - return nil -} - -// Convert_core_ProviderConfig_To_v1beta1_ProviderConfig is an autogenerated conversion function. -func Convert_core_ProviderConfig_To_v1beta1_ProviderConfig(in *core.ProviderConfig, out *ProviderConfig, s conversion.Scope) error { - return autoConvert_core_ProviderConfig_To_v1beta1_ProviderConfig(in, out, s) -} - func autoConvert_v1beta1_Quota_To_core_Quota(in *Quota, out *core.Quota, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_QuotaSpec_To_core_QuotaSpec(&in.Spec, &out.Spec, s); err != nil { @@ -3378,6 +3812,7 @@ func Convert_core_QuotaSpec_To_v1beta1_QuotaSpec(in *core.QuotaSpec, out *QuotaS func autoConvert_v1beta1_Region_To_core_Region(in *Region, out *core.Region, s conversion.Scope) error { out.Name = in.Name out.Zones = *(*[]core.AvailabilityZone)(unsafe.Pointer(&in.Zones)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) return nil } @@ -3389,6 +3824,7 @@ func Convert_v1beta1_Region_To_core_Region(in *Region, out *core.Region, s conve func autoConvert_core_Region_To_v1beta1_Region(in *core.Region, out *Region, s conversion.Scope) error { out.Name = in.Name out.Zones = *(*[]AvailabilityZone)(unsafe.Pointer(&in.Zones)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) return nil } @@ -3397,6 +3833,30 @@ func Convert_core_Region_To_v1beta1_Region(in *core.Region, out *Region, s conve return autoConvert_core_Region_To_v1beta1_Region(in, out, s) } +func autoConvert_v1beta1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize(in *ResourceWatchCacheSize, out *core.ResourceWatchCacheSize, s conversion.Scope) error { + out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup)) + out.Resource = in.Resource + out.CacheSize = in.CacheSize + return nil +} + +// Convert_v1beta1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize is an autogenerated conversion function. +func Convert_v1beta1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize(in *ResourceWatchCacheSize, out *core.ResourceWatchCacheSize, s conversion.Scope) error { + return autoConvert_v1beta1_ResourceWatchCacheSize_To_core_ResourceWatchCacheSize(in, out, s) +} + +func autoConvert_core_ResourceWatchCacheSize_To_v1beta1_ResourceWatchCacheSize(in *core.ResourceWatchCacheSize, out *ResourceWatchCacheSize, s conversion.Scope) error { + out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup)) + out.Resource = in.Resource + out.CacheSize = in.CacheSize + return nil +} + +// Convert_core_ResourceWatchCacheSize_To_v1beta1_ResourceWatchCacheSize is an autogenerated conversion function. +func Convert_core_ResourceWatchCacheSize_To_v1beta1_ResourceWatchCacheSize(in *core.ResourceWatchCacheSize, out *ResourceWatchCacheSize, s conversion.Scope) error { + return autoConvert_core_ResourceWatchCacheSize_To_v1beta1_ResourceWatchCacheSize(in, out, s) +} + func autoConvert_v1beta1_SecretBinding_To_core_SecretBinding(in *SecretBinding, out *core.SecretBinding, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.SecretRef = in.SecretRef @@ -3477,7 +3937,7 @@ func Convert_core_Seed_To_v1beta1_Seed(in *core.Seed, out *Seed, s conversion.Sc func autoConvert_v1beta1_SeedBackup_To_core_SeedBackup(in *SeedBackup, out *core.SeedBackup, s conversion.Scope) error { out.Provider = in.Provider - out.ProviderConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) out.Region = (*string)(unsafe.Pointer(in.Region)) out.SecretRef = in.SecretRef return nil @@ -3490,7 +3950,7 @@ func Convert_v1beta1_SeedBackup_To_core_SeedBackup(in *SeedBackup, out *core.See func autoConvert_core_SeedBackup_To_v1beta1_SeedBackup(in *core.SeedBackup, out *SeedBackup, s conversion.Scope) error { out.Provider = in.Provider - out.ProviderConfig = (*ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) out.Region = (*string)(unsafe.Pointer(in.Region)) out.SecretRef = in.SecretRef return nil @@ -3502,7 +3962,8 @@ func Convert_core_SeedBackup_To_v1beta1_SeedBackup(in *core.SeedBackup, out *See } func autoConvert_v1beta1_SeedDNS_To_core_SeedDNS(in *SeedDNS, out *core.SeedDNS, s conversion.Scope) error { - out.IngressDomain = in.IngressDomain + out.IngressDomain = (*string)(unsafe.Pointer(in.IngressDomain)) + out.Provider = (*core.SeedDNSProvider)(unsafe.Pointer(in.Provider)) return nil } @@ -3512,7 +3973,8 @@ func Convert_v1beta1_SeedDNS_To_core_SeedDNS(in *SeedDNS, out *core.SeedDNS, s c } func autoConvert_core_SeedDNS_To_v1beta1_SeedDNS(in *core.SeedDNS, out *SeedDNS, s conversion.Scope) error { - out.IngressDomain = in.IngressDomain + out.IngressDomain = (*string)(unsafe.Pointer(in.IngressDomain)) + out.Provider = (*SeedDNSProvider)(unsafe.Pointer(in.Provider)) return nil } @@ -3521,9 +3983,45 @@ func Convert_core_SeedDNS_To_v1beta1_SeedDNS(in *core.SeedDNS, out *SeedDNS, s c return autoConvert_core_SeedDNS_To_v1beta1_SeedDNS(in, out, s) } +func autoConvert_v1beta1_SeedDNSProvider_To_core_SeedDNSProvider(in *SeedDNSProvider, out *core.SeedDNSProvider, s conversion.Scope) error { + out.Type = in.Type + out.SecretRef = in.SecretRef + out.Domains = (*core.DNSIncludeExclude)(unsafe.Pointer(in.Domains)) + out.Zones = (*core.DNSIncludeExclude)(unsafe.Pointer(in.Zones)) + return nil +} + +// Convert_v1beta1_SeedDNSProvider_To_core_SeedDNSProvider is an autogenerated conversion function. +func Convert_v1beta1_SeedDNSProvider_To_core_SeedDNSProvider(in *SeedDNSProvider, out *core.SeedDNSProvider, s conversion.Scope) error { + return autoConvert_v1beta1_SeedDNSProvider_To_core_SeedDNSProvider(in, out, s) +} + +func autoConvert_core_SeedDNSProvider_To_v1beta1_SeedDNSProvider(in *core.SeedDNSProvider, out *SeedDNSProvider, s conversion.Scope) error { + out.Type = in.Type + out.SecretRef = in.SecretRef + out.Domains = (*DNSIncludeExclude)(unsafe.Pointer(in.Domains)) + out.Zones = (*DNSIncludeExclude)(unsafe.Pointer(in.Zones)) + return nil +} + +// Convert_core_SeedDNSProvider_To_v1beta1_SeedDNSProvider is an autogenerated conversion function. +func Convert_core_SeedDNSProvider_To_v1beta1_SeedDNSProvider(in *core.SeedDNSProvider, out *SeedDNSProvider, s conversion.Scope) error { + return autoConvert_core_SeedDNSProvider_To_v1beta1_SeedDNSProvider(in, out, s) +} + func autoConvert_v1beta1_SeedList_To_core_SeedList(in *SeedList, out *core.SeedList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]core.Seed)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.Seed, len(*in)) + for i := range *in { + if err := Convert_v1beta1_Seed_To_core_Seed(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -3534,7 +4032,17 @@ func Convert_v1beta1_SeedList_To_core_SeedList(in *SeedList, out *core.SeedList, func autoConvert_core_SeedList_To_v1beta1_SeedList(in *core.SeedList, out *SeedList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]Seed)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Seed, len(*in)) + for i := range *in { + if err := Convert_core_Seed_To_v1beta1_Seed(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -3573,7 +4081,7 @@ func Convert_core_SeedNetworks_To_v1beta1_SeedNetworks(in *core.SeedNetworks, ou func autoConvert_v1beta1_SeedProvider_To_core_SeedProvider(in *SeedProvider, out *core.SeedProvider, s conversion.Scope) error { out.Type = in.Type - out.ProviderConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) out.Region = in.Region return nil } @@ -3585,7 +4093,7 @@ func Convert_v1beta1_SeedProvider_To_core_SeedProvider(in *SeedProvider, out *co func autoConvert_core_SeedProvider_To_v1beta1_SeedProvider(in *core.SeedProvider, out *SeedProvider, s conversion.Scope) error { out.Type = in.Type - out.ProviderConfig = (*ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) out.Region = in.Region return nil } @@ -3595,6 +4103,156 @@ func Convert_core_SeedProvider_To_v1beta1_SeedProvider(in *core.SeedProvider, ou return autoConvert_core_SeedProvider_To_v1beta1_SeedProvider(in, out, s) } +func autoConvert_v1beta1_SeedSelector_To_core_SeedSelector(in *SeedSelector, out *core.SeedSelector, s conversion.Scope) error { + out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.ProviderTypes = *(*[]string)(unsafe.Pointer(&in.ProviderTypes)) + return nil +} + +// Convert_v1beta1_SeedSelector_To_core_SeedSelector is an autogenerated conversion function. +func Convert_v1beta1_SeedSelector_To_core_SeedSelector(in *SeedSelector, out *core.SeedSelector, s conversion.Scope) error { + return autoConvert_v1beta1_SeedSelector_To_core_SeedSelector(in, out, s) +} + +func autoConvert_core_SeedSelector_To_v1beta1_SeedSelector(in *core.SeedSelector, out *SeedSelector, s conversion.Scope) error { + out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.ProviderTypes = *(*[]string)(unsafe.Pointer(&in.ProviderTypes)) + return nil +} + +// Convert_core_SeedSelector_To_v1beta1_SeedSelector is an autogenerated conversion function. +func Convert_core_SeedSelector_To_v1beta1_SeedSelector(in *core.SeedSelector, out *SeedSelector, s conversion.Scope) error { + return autoConvert_core_SeedSelector_To_v1beta1_SeedSelector(in, out, s) +} + +func autoConvert_v1beta1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation(in *SeedSettingExcessCapacityReservation, out *core.SeedSettingExcessCapacityReservation, s conversion.Scope) error { + out.Enabled = in.Enabled + return nil +} + +// Convert_v1beta1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation is an autogenerated conversion function. +func Convert_v1beta1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation(in *SeedSettingExcessCapacityReservation, out *core.SeedSettingExcessCapacityReservation, s conversion.Scope) error { + return autoConvert_v1beta1_SeedSettingExcessCapacityReservation_To_core_SeedSettingExcessCapacityReservation(in, out, s) +} + +func autoConvert_core_SeedSettingExcessCapacityReservation_To_v1beta1_SeedSettingExcessCapacityReservation(in *core.SeedSettingExcessCapacityReservation, out *SeedSettingExcessCapacityReservation, s conversion.Scope) error { + out.Enabled = in.Enabled + return nil +} + +// Convert_core_SeedSettingExcessCapacityReservation_To_v1beta1_SeedSettingExcessCapacityReservation is an autogenerated conversion function. +func Convert_core_SeedSettingExcessCapacityReservation_To_v1beta1_SeedSettingExcessCapacityReservation(in *core.SeedSettingExcessCapacityReservation, out *SeedSettingExcessCapacityReservation, s conversion.Scope) error { + return autoConvert_core_SeedSettingExcessCapacityReservation_To_v1beta1_SeedSettingExcessCapacityReservation(in, out, s) +} + +func autoConvert_v1beta1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices(in *SeedSettingLoadBalancerServices, out *core.SeedSettingLoadBalancerServices, s conversion.Scope) error { + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + return nil +} + +// Convert_v1beta1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices is an autogenerated conversion function. +func Convert_v1beta1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices(in *SeedSettingLoadBalancerServices, out *core.SeedSettingLoadBalancerServices, s conversion.Scope) error { + return autoConvert_v1beta1_SeedSettingLoadBalancerServices_To_core_SeedSettingLoadBalancerServices(in, out, s) +} + +func autoConvert_core_SeedSettingLoadBalancerServices_To_v1beta1_SeedSettingLoadBalancerServices(in *core.SeedSettingLoadBalancerServices, out *SeedSettingLoadBalancerServices, s conversion.Scope) error { + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + return nil +} + +// Convert_core_SeedSettingLoadBalancerServices_To_v1beta1_SeedSettingLoadBalancerServices is an autogenerated conversion function. +func Convert_core_SeedSettingLoadBalancerServices_To_v1beta1_SeedSettingLoadBalancerServices(in *core.SeedSettingLoadBalancerServices, out *SeedSettingLoadBalancerServices, s conversion.Scope) error { + return autoConvert_core_SeedSettingLoadBalancerServices_To_v1beta1_SeedSettingLoadBalancerServices(in, out, s) +} + +func autoConvert_v1beta1_SeedSettingScheduling_To_core_SeedSettingScheduling(in *SeedSettingScheduling, out *core.SeedSettingScheduling, s conversion.Scope) error { + out.Visible = in.Visible + return nil +} + +// Convert_v1beta1_SeedSettingScheduling_To_core_SeedSettingScheduling is an autogenerated conversion function. +func Convert_v1beta1_SeedSettingScheduling_To_core_SeedSettingScheduling(in *SeedSettingScheduling, out *core.SeedSettingScheduling, s conversion.Scope) error { + return autoConvert_v1beta1_SeedSettingScheduling_To_core_SeedSettingScheduling(in, out, s) +} + +func autoConvert_core_SeedSettingScheduling_To_v1beta1_SeedSettingScheduling(in *core.SeedSettingScheduling, out *SeedSettingScheduling, s conversion.Scope) error { + out.Visible = in.Visible + return nil +} + +// Convert_core_SeedSettingScheduling_To_v1beta1_SeedSettingScheduling is an autogenerated conversion function. +func Convert_core_SeedSettingScheduling_To_v1beta1_SeedSettingScheduling(in *core.SeedSettingScheduling, out *SeedSettingScheduling, s conversion.Scope) error { + return autoConvert_core_SeedSettingScheduling_To_v1beta1_SeedSettingScheduling(in, out, s) +} + +func autoConvert_v1beta1_SeedSettingShootDNS_To_core_SeedSettingShootDNS(in *SeedSettingShootDNS, out *core.SeedSettingShootDNS, s conversion.Scope) error { + out.Enabled = in.Enabled + return nil +} + +// Convert_v1beta1_SeedSettingShootDNS_To_core_SeedSettingShootDNS is an autogenerated conversion function. +func Convert_v1beta1_SeedSettingShootDNS_To_core_SeedSettingShootDNS(in *SeedSettingShootDNS, out *core.SeedSettingShootDNS, s conversion.Scope) error { + return autoConvert_v1beta1_SeedSettingShootDNS_To_core_SeedSettingShootDNS(in, out, s) +} + +func autoConvert_core_SeedSettingShootDNS_To_v1beta1_SeedSettingShootDNS(in *core.SeedSettingShootDNS, out *SeedSettingShootDNS, s conversion.Scope) error { + out.Enabled = in.Enabled + return nil +} + +// Convert_core_SeedSettingShootDNS_To_v1beta1_SeedSettingShootDNS is an autogenerated conversion function. +func Convert_core_SeedSettingShootDNS_To_v1beta1_SeedSettingShootDNS(in *core.SeedSettingShootDNS, out *SeedSettingShootDNS, s conversion.Scope) error { + return autoConvert_core_SeedSettingShootDNS_To_v1beta1_SeedSettingShootDNS(in, out, s) +} + +func autoConvert_v1beta1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler(in *SeedSettingVerticalPodAutoscaler, out *core.SeedSettingVerticalPodAutoscaler, s conversion.Scope) error { + out.Enabled = in.Enabled + return nil +} + +// Convert_v1beta1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler is an autogenerated conversion function. +func Convert_v1beta1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler(in *SeedSettingVerticalPodAutoscaler, out *core.SeedSettingVerticalPodAutoscaler, s conversion.Scope) error { + return autoConvert_v1beta1_SeedSettingVerticalPodAutoscaler_To_core_SeedSettingVerticalPodAutoscaler(in, out, s) +} + +func autoConvert_core_SeedSettingVerticalPodAutoscaler_To_v1beta1_SeedSettingVerticalPodAutoscaler(in *core.SeedSettingVerticalPodAutoscaler, out *SeedSettingVerticalPodAutoscaler, s conversion.Scope) error { + out.Enabled = in.Enabled + return nil +} + +// Convert_core_SeedSettingVerticalPodAutoscaler_To_v1beta1_SeedSettingVerticalPodAutoscaler is an autogenerated conversion function. +func Convert_core_SeedSettingVerticalPodAutoscaler_To_v1beta1_SeedSettingVerticalPodAutoscaler(in *core.SeedSettingVerticalPodAutoscaler, out *SeedSettingVerticalPodAutoscaler, s conversion.Scope) error { + return autoConvert_core_SeedSettingVerticalPodAutoscaler_To_v1beta1_SeedSettingVerticalPodAutoscaler(in, out, s) +} + +func autoConvert_v1beta1_SeedSettings_To_core_SeedSettings(in *SeedSettings, out *core.SeedSettings, s conversion.Scope) error { + out.ExcessCapacityReservation = (*core.SeedSettingExcessCapacityReservation)(unsafe.Pointer(in.ExcessCapacityReservation)) + out.Scheduling = (*core.SeedSettingScheduling)(unsafe.Pointer(in.Scheduling)) + out.ShootDNS = (*core.SeedSettingShootDNS)(unsafe.Pointer(in.ShootDNS)) + out.LoadBalancerServices = (*core.SeedSettingLoadBalancerServices)(unsafe.Pointer(in.LoadBalancerServices)) + out.VerticalPodAutoscaler = (*core.SeedSettingVerticalPodAutoscaler)(unsafe.Pointer(in.VerticalPodAutoscaler)) + return nil +} + +// Convert_v1beta1_SeedSettings_To_core_SeedSettings is an autogenerated conversion function. +func Convert_v1beta1_SeedSettings_To_core_SeedSettings(in *SeedSettings, out *core.SeedSettings, s conversion.Scope) error { + return autoConvert_v1beta1_SeedSettings_To_core_SeedSettings(in, out, s) +} + +func autoConvert_core_SeedSettings_To_v1beta1_SeedSettings(in *core.SeedSettings, out *SeedSettings, s conversion.Scope) error { + out.ExcessCapacityReservation = (*SeedSettingExcessCapacityReservation)(unsafe.Pointer(in.ExcessCapacityReservation)) + out.Scheduling = (*SeedSettingScheduling)(unsafe.Pointer(in.Scheduling)) + out.ShootDNS = (*SeedSettingShootDNS)(unsafe.Pointer(in.ShootDNS)) + out.LoadBalancerServices = (*SeedSettingLoadBalancerServices)(unsafe.Pointer(in.LoadBalancerServices)) + out.VerticalPodAutoscaler = (*SeedSettingVerticalPodAutoscaler)(unsafe.Pointer(in.VerticalPodAutoscaler)) + return nil +} + +// Convert_core_SeedSettings_To_v1beta1_SeedSettings is an autogenerated conversion function. +func Convert_core_SeedSettings_To_v1beta1_SeedSettings(in *core.SeedSettings, out *SeedSettings, s conversion.Scope) error { + return autoConvert_core_SeedSettings_To_v1beta1_SeedSettings(in, out, s) +} + func autoConvert_v1beta1_SeedSpec_To_core_SeedSpec(in *SeedSpec, out *core.SeedSpec, s conversion.Scope) error { out.Backup = (*core.SeedBackup)(unsafe.Pointer(in.Backup)) if err := Convert_v1beta1_SeedDNS_To_core_SeedDNS(&in.DNS, &out.DNS, s); err != nil { @@ -3609,6 +4267,8 @@ func autoConvert_v1beta1_SeedSpec_To_core_SeedSpec(in *SeedSpec, out *core.SeedS out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) out.Taints = *(*[]core.SeedTaint)(unsafe.Pointer(&in.Taints)) out.Volume = (*core.SeedVolume)(unsafe.Pointer(in.Volume)) + out.Settings = (*core.SeedSettings)(unsafe.Pointer(in.Settings)) + out.Ingress = (*core.Ingress)(unsafe.Pointer(in.Ingress)) return nil } @@ -3629,8 +4289,10 @@ func autoConvert_core_SeedSpec_To_v1beta1_SeedSpec(in *core.SeedSpec, out *SeedS return err } out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.Settings = (*SeedSettings)(unsafe.Pointer(in.Settings)) out.Taints = *(*[]SeedTaint)(unsafe.Pointer(&in.Taints)) out.Volume = (*SeedVolume)(unsafe.Pointer(in.Volume)) + out.Ingress = (*Ingress)(unsafe.Pointer(in.Ingress)) return nil } @@ -3644,6 +4306,9 @@ func autoConvert_v1beta1_SeedStatus_To_core_SeedStatus(in *SeedStatus, out *core out.KubernetesVersion = (*string)(unsafe.Pointer(in.KubernetesVersion)) out.Conditions = *(*[]core.Condition)(unsafe.Pointer(&in.Conditions)) out.ObservedGeneration = in.ObservedGeneration + out.ClusterIdentity = (*string)(unsafe.Pointer(in.ClusterIdentity)) + out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Allocatable = *(*v1.ResourceList)(unsafe.Pointer(&in.Allocatable)) return nil } @@ -3657,6 +4322,9 @@ func autoConvert_core_SeedStatus_To_v1beta1_SeedStatus(in *core.SeedStatus, out out.KubernetesVersion = (*string)(unsafe.Pointer(in.KubernetesVersion)) out.Conditions = *(*[]Condition)(unsafe.Pointer(&in.Conditions)) out.ObservedGeneration = in.ObservedGeneration + out.ClusterIdentity = (*string)(unsafe.Pointer(in.ClusterIdentity)) + out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Allocatable = *(*v1.ResourceList)(unsafe.Pointer(&in.Allocatable)) return nil } @@ -3687,6 +4355,32 @@ func Convert_core_SeedTaint_To_v1beta1_SeedTaint(in *core.SeedTaint, out *SeedTa return autoConvert_core_SeedTaint_To_v1beta1_SeedTaint(in, out, s) } +func autoConvert_v1beta1_SeedTemplate_To_core_SeedTemplate(in *SeedTemplate, out *core.SeedTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_SeedSpec_To_core_SeedSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_SeedTemplate_To_core_SeedTemplate is an autogenerated conversion function. +func Convert_v1beta1_SeedTemplate_To_core_SeedTemplate(in *SeedTemplate, out *core.SeedTemplate, s conversion.Scope) error { + return autoConvert_v1beta1_SeedTemplate_To_core_SeedTemplate(in, out, s) +} + +func autoConvert_core_SeedTemplate_To_v1beta1_SeedTemplate(in *core.SeedTemplate, out *SeedTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_SeedSpec_To_v1beta1_SeedSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_core_SeedTemplate_To_v1beta1_SeedTemplate is an autogenerated conversion function. +func Convert_core_SeedTemplate_To_v1beta1_SeedTemplate(in *core.SeedTemplate, out *SeedTemplate, s conversion.Scope) error { + return autoConvert_core_SeedTemplate_To_v1beta1_SeedTemplate(in, out, s) +} + func autoConvert_v1beta1_SeedVolume_To_core_SeedVolume(in *SeedVolume, out *core.SeedVolume, s conversion.Scope) error { out.MinimumSize = (*resource.Quantity)(unsafe.Pointer(in.MinimumSize)) out.Providers = *(*[]core.SeedVolumeProvider)(unsafe.Pointer(&in.Providers)) @@ -3829,7 +4523,7 @@ func Convert_core_ShootList_To_v1beta1_ShootList(in *core.ShootList, out *ShootL func autoConvert_v1beta1_ShootMachineImage_To_core_ShootMachineImage(in *ShootMachineImage, out *core.ShootMachineImage, s conversion.Scope) error { out.Name = in.Name - out.ProviderConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) if err := metav1.Convert_Pointer_string_To_string(&in.Version, &out.Version, s); err != nil { return err } @@ -3843,7 +4537,7 @@ func Convert_v1beta1_ShootMachineImage_To_core_ShootMachineImage(in *ShootMachin func autoConvert_core_ShootMachineImage_To_v1beta1_ShootMachineImage(in *core.ShootMachineImage, out *ShootMachineImage, s conversion.Scope) error { out.Name = in.Name - out.ProviderConfig = (*ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) if err := metav1.Convert_string_To_Pointer_string(&in.Version, &out.Version, s); err != nil { return err } @@ -3898,6 +4592,9 @@ func autoConvert_v1beta1_ShootSpec_To_core_ShootSpec(in *ShootSpec, out *core.Sh out.Region = in.Region out.SecretBindingName = in.SecretBindingName out.SeedName = (*string)(unsafe.Pointer(in.SeedName)) + out.SeedSelector = (*core.SeedSelector)(unsafe.Pointer(in.SeedSelector)) + out.Resources = *(*[]core.NamedResourceReference)(unsafe.Pointer(&in.Resources)) + out.Tolerations = *(*[]core.Toleration)(unsafe.Pointer(&in.Tolerations)) return nil } @@ -3927,6 +4624,9 @@ func autoConvert_core_ShootSpec_To_v1beta1_ShootSpec(in *core.ShootSpec, out *Sh out.Region = in.Region out.SecretBindingName = in.SecretBindingName out.SeedName = (*string)(unsafe.Pointer(in.SeedName)) + out.SeedSelector = (*SeedSelector)(unsafe.Pointer(in.SeedSelector)) + out.Resources = *(*[]NamedResourceReference)(unsafe.Pointer(&in.Resources)) + out.Tolerations = *(*[]Toleration)(unsafe.Pointer(&in.Tolerations)) return nil } @@ -3949,6 +4649,7 @@ func autoConvert_v1beta1_ShootStatus_To_core_ShootStatus(in *ShootStatus, out *c out.SeedName = (*string)(unsafe.Pointer(in.SeedName)) out.TechnicalID = in.TechnicalID out.UID = types.UID(in.UID) + out.ClusterIdentity = (*string)(unsafe.Pointer(in.ClusterIdentity)) return nil } @@ -3971,6 +4672,7 @@ func autoConvert_core_ShootStatus_To_v1beta1_ShootStatus(in *core.ShootStatus, o out.SeedName = (*string)(unsafe.Pointer(in.SeedName)) out.TechnicalID = in.TechnicalID out.UID = types.UID(in.UID) + out.ClusterIdentity = (*string)(unsafe.Pointer(in.ClusterIdentity)) return nil } @@ -3979,6 +4681,62 @@ func Convert_core_ShootStatus_To_v1beta1_ShootStatus(in *core.ShootStatus, out * return autoConvert_core_ShootStatus_To_v1beta1_ShootStatus(in, out, s) } +func autoConvert_v1beta1_Toleration_To_core_Toleration(in *Toleration, out *core.Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Value = (*string)(unsafe.Pointer(in.Value)) + return nil +} + +// Convert_v1beta1_Toleration_To_core_Toleration is an autogenerated conversion function. +func Convert_v1beta1_Toleration_To_core_Toleration(in *Toleration, out *core.Toleration, s conversion.Scope) error { + return autoConvert_v1beta1_Toleration_To_core_Toleration(in, out, s) +} + +func autoConvert_core_Toleration_To_v1beta1_Toleration(in *core.Toleration, out *Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Value = (*string)(unsafe.Pointer(in.Value)) + return nil +} + +// Convert_core_Toleration_To_v1beta1_Toleration is an autogenerated conversion function. +func Convert_core_Toleration_To_v1beta1_Toleration(in *core.Toleration, out *Toleration, s conversion.Scope) error { + return autoConvert_core_Toleration_To_v1beta1_Toleration(in, out, s) +} + +func autoConvert_v1beta1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler(in *VerticalPodAutoscaler, out *core.VerticalPodAutoscaler, s conversion.Scope) error { + out.Enabled = in.Enabled + out.EvictAfterOOMThreshold = (*metav1.Duration)(unsafe.Pointer(in.EvictAfterOOMThreshold)) + out.EvictionRateBurst = (*int32)(unsafe.Pointer(in.EvictionRateBurst)) + out.EvictionRateLimit = (*float64)(unsafe.Pointer(in.EvictionRateLimit)) + out.EvictionTolerance = (*float64)(unsafe.Pointer(in.EvictionTolerance)) + out.RecommendationMarginFraction = (*float64)(unsafe.Pointer(in.RecommendationMarginFraction)) + out.UpdaterInterval = (*metav1.Duration)(unsafe.Pointer(in.UpdaterInterval)) + out.RecommenderInterval = (*metav1.Duration)(unsafe.Pointer(in.RecommenderInterval)) + return nil +} + +// Convert_v1beta1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler is an autogenerated conversion function. +func Convert_v1beta1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler(in *VerticalPodAutoscaler, out *core.VerticalPodAutoscaler, s conversion.Scope) error { + return autoConvert_v1beta1_VerticalPodAutoscaler_To_core_VerticalPodAutoscaler(in, out, s) +} + +func autoConvert_core_VerticalPodAutoscaler_To_v1beta1_VerticalPodAutoscaler(in *core.VerticalPodAutoscaler, out *VerticalPodAutoscaler, s conversion.Scope) error { + out.Enabled = in.Enabled + out.EvictAfterOOMThreshold = (*metav1.Duration)(unsafe.Pointer(in.EvictAfterOOMThreshold)) + out.EvictionRateBurst = (*int32)(unsafe.Pointer(in.EvictionRateBurst)) + out.EvictionRateLimit = (*float64)(unsafe.Pointer(in.EvictionRateLimit)) + out.EvictionTolerance = (*float64)(unsafe.Pointer(in.EvictionTolerance)) + out.RecommendationMarginFraction = (*float64)(unsafe.Pointer(in.RecommendationMarginFraction)) + out.UpdaterInterval = (*metav1.Duration)(unsafe.Pointer(in.UpdaterInterval)) + out.RecommenderInterval = (*metav1.Duration)(unsafe.Pointer(in.RecommenderInterval)) + return nil +} + +// Convert_core_VerticalPodAutoscaler_To_v1beta1_VerticalPodAutoscaler is an autogenerated conversion function. +func Convert_core_VerticalPodAutoscaler_To_v1beta1_VerticalPodAutoscaler(in *core.VerticalPodAutoscaler, out *VerticalPodAutoscaler, s conversion.Scope) error { + return autoConvert_core_VerticalPodAutoscaler_To_v1beta1_VerticalPodAutoscaler(in, out, s) +} + func autoConvert_v1beta1_Volume_To_core_Volume(in *Volume, out *core.Volume, s conversion.Scope) error { out.Name = (*string)(unsafe.Pointer(in.Name)) out.Type = (*string)(unsafe.Pointer(in.Type)) @@ -4029,6 +4787,28 @@ func Convert_core_VolumeType_To_v1beta1_VolumeType(in *core.VolumeType, out *Vol return autoConvert_core_VolumeType_To_v1beta1_VolumeType(in, out, s) } +func autoConvert_v1beta1_WatchCacheSizes_To_core_WatchCacheSizes(in *WatchCacheSizes, out *core.WatchCacheSizes, s conversion.Scope) error { + out.Default = (*int32)(unsafe.Pointer(in.Default)) + out.Resources = *(*[]core.ResourceWatchCacheSize)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_v1beta1_WatchCacheSizes_To_core_WatchCacheSizes is an autogenerated conversion function. +func Convert_v1beta1_WatchCacheSizes_To_core_WatchCacheSizes(in *WatchCacheSizes, out *core.WatchCacheSizes, s conversion.Scope) error { + return autoConvert_v1beta1_WatchCacheSizes_To_core_WatchCacheSizes(in, out, s) +} + +func autoConvert_core_WatchCacheSizes_To_v1beta1_WatchCacheSizes(in *core.WatchCacheSizes, out *WatchCacheSizes, s conversion.Scope) error { + out.Default = (*int32)(unsafe.Pointer(in.Default)) + out.Resources = *(*[]ResourceWatchCacheSize)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_core_WatchCacheSizes_To_v1beta1_WatchCacheSizes is an autogenerated conversion function. +func Convert_core_WatchCacheSizes_To_v1beta1_WatchCacheSizes(in *core.WatchCacheSizes, out *WatchCacheSizes, s conversion.Scope) error { + return autoConvert_core_WatchCacheSizes_To_v1beta1_WatchCacheSizes(in, out, s) +} + func autoConvert_v1beta1_Worker_To_core_Worker(in *Worker, out *core.Worker, s conversion.Scope) error { out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) out.CABundle = (*string)(unsafe.Pointer(in.CABundle)) @@ -4043,12 +4823,14 @@ func autoConvert_v1beta1_Worker_To_core_Worker(in *Worker, out *core.Worker, s c out.Minimum = in.Minimum out.MaxSurge = (*intstr.IntOrString)(unsafe.Pointer(in.MaxSurge)) out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable)) - out.ProviderConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints)) out.Volume = (*core.Volume)(unsafe.Pointer(in.Volume)) - out.DataVolumes = *(*[]core.Volume)(unsafe.Pointer(&in.DataVolumes)) + out.DataVolumes = *(*[]core.DataVolume)(unsafe.Pointer(&in.DataVolumes)) out.KubeletDataVolumeName = (*string)(unsafe.Pointer(in.KubeletDataVolumeName)) out.Zones = *(*[]string)(unsafe.Pointer(&in.Zones)) + out.SystemComponents = (*core.WorkerSystemComponents)(unsafe.Pointer(in.SystemComponents)) + out.MachineControllerManagerSettings = (*core.MachineControllerManagerSettings)(unsafe.Pointer(in.MachineControllerManagerSettings)) return nil } @@ -4071,12 +4853,14 @@ func autoConvert_core_Worker_To_v1beta1_Worker(in *core.Worker, out *Worker, s c out.Minimum = in.Minimum out.MaxSurge = (*intstr.IntOrString)(unsafe.Pointer(in.MaxSurge)) out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable)) - out.ProviderConfig = (*ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.ProviderConfig = (*runtime.RawExtension)(unsafe.Pointer(in.ProviderConfig)) + out.SystemComponents = (*WorkerSystemComponents)(unsafe.Pointer(in.SystemComponents)) out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints)) out.Volume = (*Volume)(unsafe.Pointer(in.Volume)) - out.DataVolumes = *(*[]Volume)(unsafe.Pointer(&in.DataVolumes)) + out.DataVolumes = *(*[]DataVolume)(unsafe.Pointer(&in.DataVolumes)) out.KubeletDataVolumeName = (*string)(unsafe.Pointer(in.KubeletDataVolumeName)) out.Zones = *(*[]string)(unsafe.Pointer(&in.Zones)) + out.MachineControllerManagerSettings = (*MachineControllerManagerSettings)(unsafe.Pointer(in.MachineControllerManagerSettings)) return nil } @@ -4104,3 +4888,23 @@ func autoConvert_core_WorkerKubernetes_To_v1beta1_WorkerKubernetes(in *core.Work func Convert_core_WorkerKubernetes_To_v1beta1_WorkerKubernetes(in *core.WorkerKubernetes, out *WorkerKubernetes, s conversion.Scope) error { return autoConvert_core_WorkerKubernetes_To_v1beta1_WorkerKubernetes(in, out, s) } + +func autoConvert_v1beta1_WorkerSystemComponents_To_core_WorkerSystemComponents(in *WorkerSystemComponents, out *core.WorkerSystemComponents, s conversion.Scope) error { + out.Allow = in.Allow + return nil +} + +// Convert_v1beta1_WorkerSystemComponents_To_core_WorkerSystemComponents is an autogenerated conversion function. +func Convert_v1beta1_WorkerSystemComponents_To_core_WorkerSystemComponents(in *WorkerSystemComponents, out *core.WorkerSystemComponents, s conversion.Scope) error { + return autoConvert_v1beta1_WorkerSystemComponents_To_core_WorkerSystemComponents(in, out, s) +} + +func autoConvert_core_WorkerSystemComponents_To_v1beta1_WorkerSystemComponents(in *core.WorkerSystemComponents, out *WorkerSystemComponents, s conversion.Scope) error { + out.Allow = in.Allow + return nil +} + +// Convert_core_WorkerSystemComponents_To_v1beta1_WorkerSystemComponents is an autogenerated conversion function. +func Convert_core_WorkerSystemComponents_To_v1beta1_WorkerSystemComponents(in *core.WorkerSystemComponents, out *WorkerSystemComponents, s conversion.Scope) error { + return autoConvert_core_WorkerSystemComponents_To_v1beta1_WorkerSystemComponents(in, out, s) +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.deepcopy.go index aa32a7a08..1a2f124ab 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -75,7 +75,7 @@ func (in *AdmissionPlugin) DeepCopyInto(out *AdmissionPlugin) { *out = *in if in.Config != nil { in, out := &in.Config, &out.Config - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } return @@ -263,7 +263,7 @@ func (in *BackupBucketSpec) DeepCopyInto(out *BackupBucketSpec) { out.Provider = in.Provider if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } out.SecretRef = in.SecretRef @@ -290,7 +290,7 @@ func (in *BackupBucketStatus) DeepCopyInto(out *BackupBucketStatus) { *out = *in if in.ProviderStatus != nil { in, out := &in.ProviderStatus, &out.ProviderStatus - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } if in.LastOperation != nil { @@ -553,7 +553,7 @@ func (in *CloudProfileSpec) DeepCopyInto(out *CloudProfileSpec) { } if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } if in.Regions != nil { @@ -565,7 +565,7 @@ func (in *CloudProfileSpec) DeepCopyInto(out *CloudProfileSpec) { } if in.SeedSelector != nil { in, out := &in.SeedSelector, &out.SeedSelector - *out = new(metav1.LabelSelector) + *out = new(SeedSelector) (*in).DeepCopyInto(*out) } if in.VolumeTypes != nil { @@ -657,6 +657,11 @@ func (in *Condition) DeepCopyInto(out *Condition) { *out = *in in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + if in.Codes != nil { + in, out := &in.Codes, &out.Codes + *out = make([]ErrorCode, len(*in)) + copy(*out, *in) + } return } @@ -675,7 +680,7 @@ func (in *ContainerRuntime) DeepCopyInto(out *ContainerRuntime) { *out = *in if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } return @@ -696,7 +701,17 @@ func (in *ControllerDeployment) DeepCopyInto(out *ControllerDeployment) { *out = *in if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(ControllerDeploymentPolicy) + **out = **in + } + if in.SeedSelector != nil { + in, out := &in.SeedSelector, &out.SeedSelector + *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } return @@ -803,7 +818,7 @@ func (in *ControllerInstallationStatus) DeepCopyInto(out *ControllerInstallation } if in.ProviderStatus != nil { in, out := &in.ProviderStatus, &out.ProviderStatus - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } return @@ -920,6 +935,11 @@ func (in *ControllerResource) DeepCopyInto(out *ControllerResource) { *out = new(metav1.Duration) **out = **in } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } return } @@ -1028,6 +1048,32 @@ func (in *DNSProvider) DeepCopy() *DNSProvider { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolume) DeepCopyInto(out *DataVolume) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolume. +func (in *DataVolume) DeepCopy() *DataVolume { + if in == nil { + return nil + } + out := new(DataVolume) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Endpoint) DeepCopyInto(out *Endpoint) { *out = *in @@ -1074,9 +1120,14 @@ func (in *Extension) DeepCopyInto(out *Extension) { *out = *in if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } return } @@ -1216,6 +1267,44 @@ func (in *HorizontalPodAutoscalerConfig) DeepCopy() *HorizontalPodAutoscalerConf return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ingress) DeepCopyInto(out *Ingress) { + *out = *in + in.Controller.DeepCopyInto(&out.Controller) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. +func (in *Ingress) DeepCopy() *Ingress { + if in == nil { + return nil + } + out := new(Ingress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressController) DeepCopyInto(out *IngressController) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressController. +func (in *IngressController) DeepCopy() *IngressController { + if in == nil { + return nil + } + out := new(IngressController) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) { *out = *in @@ -1259,6 +1348,16 @@ func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) { *out = new(ServiceAccountConfig) (*in).DeepCopyInto(*out) } + if in.WatchCacheSizes != nil { + in, out := &in.WatchCacheSizes, &out.WatchCacheSizes + *out = new(WatchCacheSizes) + (*in).DeepCopyInto(*out) + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(KubeAPIServerRequests) + (*in).DeepCopyInto(*out) + } return } @@ -1272,6 +1371,32 @@ func (in *KubeAPIServerConfig) DeepCopy() *KubeAPIServerConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerRequests) DeepCopyInto(out *KubeAPIServerRequests) { + *out = *in + if in.MaxNonMutatingInflight != nil { + in, out := &in.MaxNonMutatingInflight, &out.MaxNonMutatingInflight + *out = new(int32) + **out = **in + } + if in.MaxMutatingInflight != nil { + in, out := &in.MaxMutatingInflight, &out.MaxMutatingInflight + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerRequests. +func (in *KubeAPIServerRequests) DeepCopy() *KubeAPIServerRequests { + if in == nil { + return nil + } + out := new(KubeAPIServerRequests) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerConfig) { *out = *in @@ -1286,6 +1411,11 @@ func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerCo *out = new(int32) **out = **in } + if in.PodEvictionTimeout != nil { + in, out := &in.PodEvictionTimeout, &out.PodEvictionTimeout + *out = new(metav1.Duration) + **out = **in + } return } @@ -1325,6 +1455,11 @@ func (in *KubeProxyConfig) DeepCopy() *KubeProxyConfig { func (in *KubeSchedulerConfig) DeepCopyInto(out *KubeSchedulerConfig) { *out = *in in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig) + if in.KubeMaxPDVols != nil { + in, out := &in.KubeMaxPDVols, &out.KubeMaxPDVols + *out = new(string) + **out = **in + } return } @@ -1397,6 +1532,21 @@ func (in *KubeletConfig) DeepCopyInto(out *KubeletConfig) { *out = new(metav1.Duration) **out = **in } + if in.FailSwapOn != nil { + in, out := &in.FailSwapOn, &out.FailSwapOn + *out = new(bool) + **out = **in + } + if in.KubeReserved != nil { + in, out := &in.KubeReserved, &out.KubeReserved + *out = new(KubeletConfigReserved) + (*in).DeepCopyInto(*out) + } + if in.SystemReserved != nil { + in, out := &in.SystemReserved, &out.SystemReserved + *out = new(KubeletConfigReserved) + (*in).DeepCopyInto(*out) + } return } @@ -1533,6 +1683,42 @@ func (in *KubeletConfigEvictionSoftGracePeriod) DeepCopy() *KubeletConfigEvictio return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfigReserved) DeepCopyInto(out *KubeletConfigReserved) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + x := (*in).DeepCopy() + *out = &x + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + x := (*in).DeepCopy() + *out = &x + } + if in.EphemeralStorage != nil { + in, out := &in.EphemeralStorage, &out.EphemeralStorage + x := (*in).DeepCopy() + *out = &x + } + if in.PID != nil { + in, out := &in.PID, &out.PID + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigReserved. +func (in *KubeletConfigReserved) DeepCopy() *KubeletConfigReserved { + if in == nil { + return nil + } + out := new(KubeletConfigReserved) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Kubernetes) DeepCopyInto(out *Kubernetes) { *out = *in @@ -1571,6 +1757,11 @@ func (in *Kubernetes) DeepCopyInto(out *Kubernetes) { *out = new(KubeletConfig) (*in).DeepCopyInto(*out) } + if in.VerticalPodAutoscaler != nil { + in, out := &in.VerticalPodAutoscaler, &out.VerticalPodAutoscaler + *out = new(VerticalPodAutoscaler) + (*in).DeepCopyInto(*out) + } return } @@ -1736,12 +1927,53 @@ func (in *Machine) DeepCopy() *Machine { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineControllerManagerSettings) DeepCopyInto(out *MachineControllerManagerSettings) { + *out = *in + if in.MachineDrainTimeout != nil { + in, out := &in.MachineDrainTimeout, &out.MachineDrainTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.MachineHealthTimeout != nil { + in, out := &in.MachineHealthTimeout, &out.MachineHealthTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.MachineCreationTimeout != nil { + in, out := &in.MachineCreationTimeout, &out.MachineCreationTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.MaxEvictRetries != nil { + in, out := &in.MaxEvictRetries, &out.MaxEvictRetries + *out = new(int32) + **out = **in + } + if in.NodeConditions != nil { + in, out := &in.NodeConditions, &out.NodeConditions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineControllerManagerSettings. +func (in *MachineControllerManagerSettings) DeepCopy() *MachineControllerManagerSettings { + if in == nil { + return nil + } + out := new(MachineControllerManagerSettings) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineImage) DeepCopyInto(out *MachineImage) { *out = *in if in.Versions != nil { in, out := &in.Versions, &out.Versions - *out = make([]ExpirableVersion, len(*in)) + *out = make([]MachineImageVersion, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1759,6 +1991,30 @@ func (in *MachineImage) DeepCopy() *MachineImage { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineImageVersion) DeepCopyInto(out *MachineImageVersion) { + *out = *in + in.ExpirableVersion.DeepCopyInto(&out.ExpirableVersion) + if in.CRI != nil { + in, out := &in.CRI, &out.CRI + *out = make([]CRI, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineImageVersion. +func (in *MachineImageVersion) DeepCopy() *MachineImageVersion { + if in == nil { + return nil + } + out := new(MachineImageVersion) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineType) DeepCopyInto(out *MachineType) { *out = *in @@ -1818,6 +2074,11 @@ func (in *Maintenance) DeepCopyInto(out *Maintenance) { *out = new(MaintenanceTimeWindow) **out = **in } + if in.ConfineSpecUpdateRollout != nil { + in, out := &in.ConfineSpecUpdateRollout, &out.ConfineSpecUpdateRollout + *out = new(bool) + **out = **in + } return } @@ -1884,12 +2145,29 @@ func (in *Monitoring) DeepCopy() *Monitoring { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedResourceReference) DeepCopyInto(out *NamedResourceReference) { + *out = *in + out.ResourceRef = in.ResourceRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourceReference. +func (in *NamedResourceReference) DeepCopy() *NamedResourceReference { + if in == nil { + return nil + } + out := new(NamedResourceReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Networking) DeepCopyInto(out *Networking) { *out = *in if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } if in.Pods != nil { @@ -2172,7 +2450,7 @@ func (in *Project) DeepCopyInto(out *Project) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -2284,6 +2562,11 @@ func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { *out = new(string) **out = **in } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = new(ProjectTolerations) + (*in).DeepCopyInto(*out) + } return } @@ -2300,6 +2583,14 @@ func (in *ProjectSpec) DeepCopy() *ProjectSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { *out = *in + if in.StaleSinceTimestamp != nil { + in, out := &in.StaleSinceTimestamp, &out.StaleSinceTimestamp + *out = (*in).DeepCopy() + } + if in.StaleAutoDeleteTimestamp != nil { + in, out := &in.StaleAutoDeleteTimestamp, &out.StaleAutoDeleteTimestamp + *out = (*in).DeepCopy() + } return } @@ -2313,17 +2604,47 @@ func (in *ProjectStatus) DeepCopy() *ProjectStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectTolerations) DeepCopyInto(out *ProjectTolerations) { + *out = *in + if in.Defaults != nil { + in, out := &in.Defaults, &out.Defaults + *out = make([]Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Whitelist != nil { + in, out := &in.Whitelist, &out.Whitelist + *out = make([]Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectTolerations. +func (in *ProjectTolerations) DeepCopy() *ProjectTolerations { + if in == nil { + return nil + } + out := new(ProjectTolerations) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Provider) DeepCopyInto(out *Provider) { *out = *in if in.ControlPlaneConfig != nil { in, out := &in.ControlPlaneConfig, &out.ControlPlaneConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } if in.InfrastructureConfig != nil { in, out := &in.InfrastructureConfig, &out.InfrastructureConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } if in.Workers != nil { @@ -2346,23 +2667,6 @@ func (in *Provider) DeepCopy() *Provider { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProviderConfig) DeepCopyInto(out *ProviderConfig) { - *out = *in - in.RawExtension.DeepCopyInto(&out.RawExtension) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfig. -func (in *ProviderConfig) DeepCopy() *ProviderConfig { - if in == nil { - return nil - } - out := new(ProviderConfig) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Quota) DeepCopyInto(out *Quota) { *out = *in @@ -2462,6 +2766,13 @@ func (in *Region) DeepCopyInto(out *Region) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } return } @@ -2475,6 +2786,27 @@ func (in *Region) DeepCopy() *Region { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceWatchCacheSize) DeepCopyInto(out *ResourceWatchCacheSize) { + *out = *in + if in.APIGroup != nil { + in, out := &in.APIGroup, &out.APIGroup + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceWatchCacheSize. +func (in *ResourceWatchCacheSize) DeepCopy() *ResourceWatchCacheSize { + if in == nil { + return nil + } + out := new(ResourceWatchCacheSize) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecretBinding) DeepCopyInto(out *SecretBinding) { *out = *in @@ -2573,7 +2905,7 @@ func (in *SeedBackup) DeepCopyInto(out *SeedBackup) { *out = *in if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } if in.Region != nil { @@ -2598,8 +2930,18 @@ func (in *SeedBackup) DeepCopy() *SeedBackup { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SeedDNS) DeepCopyInto(out *SeedDNS) { *out = *in - return -} + if in.IngressDomain != nil { + in, out := &in.IngressDomain, &out.IngressDomain + *out = new(string) + **out = **in + } + if in.Provider != nil { + in, out := &in.Provider, &out.Provider + *out = new(SeedDNSProvider) + (*in).DeepCopyInto(*out) + } + return +} // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedDNS. func (in *SeedDNS) DeepCopy() *SeedDNS { @@ -2611,6 +2953,33 @@ func (in *SeedDNS) DeepCopy() *SeedDNS { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedDNSProvider) DeepCopyInto(out *SeedDNSProvider) { + *out = *in + out.SecretRef = in.SecretRef + if in.Domains != nil { + in, out := &in.Domains, &out.Domains + *out = new(DNSIncludeExclude) + (*in).DeepCopyInto(*out) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = new(DNSIncludeExclude) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedDNSProvider. +func (in *SeedDNSProvider) DeepCopy() *SeedDNSProvider { + if in == nil { + return nil + } + out := new(SeedDNSProvider) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SeedList) DeepCopyInto(out *SeedList) { *out = *in @@ -2680,7 +3049,7 @@ func (in *SeedProvider) DeepCopyInto(out *SeedProvider) { *out = *in if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } return @@ -2696,6 +3065,160 @@ func (in *SeedProvider) DeepCopy() *SeedProvider { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSelector) DeepCopyInto(out *SeedSelector) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ProviderTypes != nil { + in, out := &in.ProviderTypes, &out.ProviderTypes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSelector. +func (in *SeedSelector) DeepCopy() *SeedSelector { + if in == nil { + return nil + } + out := new(SeedSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSettingExcessCapacityReservation) DeepCopyInto(out *SeedSettingExcessCapacityReservation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingExcessCapacityReservation. +func (in *SeedSettingExcessCapacityReservation) DeepCopy() *SeedSettingExcessCapacityReservation { + if in == nil { + return nil + } + out := new(SeedSettingExcessCapacityReservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSettingLoadBalancerServices) DeepCopyInto(out *SeedSettingLoadBalancerServices) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingLoadBalancerServices. +func (in *SeedSettingLoadBalancerServices) DeepCopy() *SeedSettingLoadBalancerServices { + if in == nil { + return nil + } + out := new(SeedSettingLoadBalancerServices) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSettingScheduling) DeepCopyInto(out *SeedSettingScheduling) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingScheduling. +func (in *SeedSettingScheduling) DeepCopy() *SeedSettingScheduling { + if in == nil { + return nil + } + out := new(SeedSettingScheduling) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSettingShootDNS) DeepCopyInto(out *SeedSettingShootDNS) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingShootDNS. +func (in *SeedSettingShootDNS) DeepCopy() *SeedSettingShootDNS { + if in == nil { + return nil + } + out := new(SeedSettingShootDNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSettingVerticalPodAutoscaler) DeepCopyInto(out *SeedSettingVerticalPodAutoscaler) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingVerticalPodAutoscaler. +func (in *SeedSettingVerticalPodAutoscaler) DeepCopy() *SeedSettingVerticalPodAutoscaler { + if in == nil { + return nil + } + out := new(SeedSettingVerticalPodAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSettings) DeepCopyInto(out *SeedSettings) { + *out = *in + if in.ExcessCapacityReservation != nil { + in, out := &in.ExcessCapacityReservation, &out.ExcessCapacityReservation + *out = new(SeedSettingExcessCapacityReservation) + **out = **in + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = new(SeedSettingScheduling) + **out = **in + } + if in.ShootDNS != nil { + in, out := &in.ShootDNS, &out.ShootDNS + *out = new(SeedSettingShootDNS) + **out = **in + } + if in.LoadBalancerServices != nil { + in, out := &in.LoadBalancerServices, &out.LoadBalancerServices + *out = new(SeedSettingLoadBalancerServices) + (*in).DeepCopyInto(*out) + } + if in.VerticalPodAutoscaler != nil { + in, out := &in.VerticalPodAutoscaler, &out.VerticalPodAutoscaler + *out = new(SeedSettingVerticalPodAutoscaler) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettings. +func (in *SeedSettings) DeepCopy() *SeedSettings { + if in == nil { + return nil + } + out := new(SeedSettings) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SeedSpec) DeepCopyInto(out *SeedSpec) { *out = *in @@ -2704,7 +3227,7 @@ func (in *SeedSpec) DeepCopyInto(out *SeedSpec) { *out = new(SeedBackup) (*in).DeepCopyInto(*out) } - out.DNS = in.DNS + in.DNS.DeepCopyInto(&out.DNS) in.Networks.DeepCopyInto(&out.Networks) in.Provider.DeepCopyInto(&out.Provider) if in.SecretRef != nil { @@ -2724,6 +3247,16 @@ func (in *SeedSpec) DeepCopyInto(out *SeedSpec) { *out = new(SeedVolume) (*in).DeepCopyInto(*out) } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(SeedSettings) + (*in).DeepCopyInto(*out) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = new(Ingress) + (*in).DeepCopyInto(*out) + } return } @@ -2757,6 +3290,25 @@ func (in *SeedStatus) DeepCopyInto(out *SeedStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ClusterIdentity != nil { + in, out := &in.ClusterIdentity, &out.ClusterIdentity + *out = new(string) + **out = **in + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } return } @@ -2791,6 +3343,24 @@ func (in *SeedTaint) DeepCopy() *SeedTaint { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedTemplate) DeepCopyInto(out *SeedTemplate) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedTemplate. +func (in *SeedTemplate) DeepCopy() *SeedTemplate { + if in == nil { + return nil + } + out := new(SeedTemplate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SeedVolume) DeepCopyInto(out *SeedVolume) { *out = *in @@ -2925,7 +3495,7 @@ func (in *ShootMachineImage) DeepCopyInto(out *ShootMachineImage) { *out = *in if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } if in.Version != nil { @@ -3020,6 +3590,23 @@ func (in *ShootSpec) DeepCopyInto(out *ShootSpec) { *out = new(string) **out = **in } + if in.SeedSelector != nil { + in, out := &in.SeedSelector, &out.SeedSelector + *out = new(SeedSelector) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]NamedResourceReference, len(*in)) + copy(*out, *in) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -3072,6 +3659,11 @@ func (in *ShootStatus) DeepCopyInto(out *ShootStatus) { *out = new(string) **out = **in } + if in.ClusterIdentity != nil { + in, out := &in.ClusterIdentity, &out.ClusterIdentity + *out = new(string) + **out = **in + } return } @@ -3085,6 +3677,78 @@ func (in *ShootStatus) DeepCopy() *ShootStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Toleration) DeepCopyInto(out *Toleration) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration. +func (in *Toleration) DeepCopy() *Toleration { + if in == nil { + return nil + } + out := new(Toleration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscaler) DeepCopyInto(out *VerticalPodAutoscaler) { + *out = *in + if in.EvictAfterOOMThreshold != nil { + in, out := &in.EvictAfterOOMThreshold, &out.EvictAfterOOMThreshold + *out = new(metav1.Duration) + **out = **in + } + if in.EvictionRateBurst != nil { + in, out := &in.EvictionRateBurst, &out.EvictionRateBurst + *out = new(int32) + **out = **in + } + if in.EvictionRateLimit != nil { + in, out := &in.EvictionRateLimit, &out.EvictionRateLimit + *out = new(float64) + **out = **in + } + if in.EvictionTolerance != nil { + in, out := &in.EvictionTolerance, &out.EvictionTolerance + *out = new(float64) + **out = **in + } + if in.RecommendationMarginFraction != nil { + in, out := &in.RecommendationMarginFraction, &out.RecommendationMarginFraction + *out = new(float64) + **out = **in + } + if in.UpdaterInterval != nil { + in, out := &in.UpdaterInterval, &out.UpdaterInterval + *out = new(metav1.Duration) + **out = **in + } + if in.RecommenderInterval != nil { + in, out := &in.RecommenderInterval, &out.RecommenderInterval + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscaler. +func (in *VerticalPodAutoscaler) DeepCopy() *VerticalPodAutoscaler { + if in == nil { + return nil + } + out := new(VerticalPodAutoscaler) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Volume) DeepCopyInto(out *Volume) { *out = *in @@ -3137,6 +3801,34 @@ func (in *VolumeType) DeepCopy() *VolumeType { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WatchCacheSizes) DeepCopyInto(out *WatchCacheSizes) { + *out = *in + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(int32) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceWatchCacheSize, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatchCacheSizes. +func (in *WatchCacheSizes) DeepCopy() *WatchCacheSizes { + if in == nil { + return nil + } + out := new(WatchCacheSizes) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Worker) DeepCopyInto(out *Worker) { *out = *in @@ -3182,7 +3874,7 @@ func (in *Worker) DeepCopyInto(out *Worker) { } if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } if in.Taints != nil { @@ -3199,7 +3891,7 @@ func (in *Worker) DeepCopyInto(out *Worker) { } if in.DataVolumes != nil { in, out := &in.DataVolumes, &out.DataVolumes - *out = make([]Volume, len(*in)) + *out = make([]DataVolume, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -3214,6 +3906,16 @@ func (in *Worker) DeepCopyInto(out *Worker) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.SystemComponents != nil { + in, out := &in.SystemComponents, &out.SystemComponents + *out = new(WorkerSystemComponents) + **out = **in + } + if in.MachineControllerManagerSettings != nil { + in, out := &in.MachineControllerManagerSettings, &out.MachineControllerManagerSettings + *out = new(MachineControllerManagerSettings) + (*in).DeepCopyInto(*out) + } return } @@ -3247,3 +3949,19 @@ func (in *WorkerKubernetes) DeepCopy() *WorkerKubernetes { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerSystemComponents) DeepCopyInto(out *WorkerSystemComponents) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerSystemComponents. +func (in *WorkerSystemComponents) DeepCopy() *WorkerSystemComponents { + if in == nil { + return nil + } + out := new(WorkerSystemComponents) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.defaults.go index 214d9743d..1f87ca32e 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.defaults.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -30,10 +30,14 @@ import ( func RegisterDefaults(scheme *runtime.Scheme) error { scheme.AddTypeDefaultingFunc(&CloudProfile{}, func(obj interface{}) { SetObjectDefaults_CloudProfile(obj.(*CloudProfile)) }) scheme.AddTypeDefaultingFunc(&CloudProfileList{}, func(obj interface{}) { SetObjectDefaults_CloudProfileList(obj.(*CloudProfileList)) }) + scheme.AddTypeDefaultingFunc(&ControllerRegistration{}, func(obj interface{}) { SetObjectDefaults_ControllerRegistration(obj.(*ControllerRegistration)) }) + scheme.AddTypeDefaultingFunc(&ControllerRegistrationList{}, func(obj interface{}) { SetObjectDefaults_ControllerRegistrationList(obj.(*ControllerRegistrationList)) }) scheme.AddTypeDefaultingFunc(&Project{}, func(obj interface{}) { SetObjectDefaults_Project(obj.(*Project)) }) scheme.AddTypeDefaultingFunc(&ProjectList{}, func(obj interface{}) { SetObjectDefaults_ProjectList(obj.(*ProjectList)) }) scheme.AddTypeDefaultingFunc(&SecretBinding{}, func(obj interface{}) { SetObjectDefaults_SecretBinding(obj.(*SecretBinding)) }) scheme.AddTypeDefaultingFunc(&SecretBindingList{}, func(obj interface{}) { SetObjectDefaults_SecretBindingList(obj.(*SecretBindingList)) }) + scheme.AddTypeDefaultingFunc(&Seed{}, func(obj interface{}) { SetObjectDefaults_Seed(obj.(*Seed)) }) + scheme.AddTypeDefaultingFunc(&SeedList{}, func(obj interface{}) { SetObjectDefaults_SeedList(obj.(*SeedList)) }) scheme.AddTypeDefaultingFunc(&Shoot{}, func(obj interface{}) { SetObjectDefaults_Shoot(obj.(*Shoot)) }) scheme.AddTypeDefaultingFunc(&ShootList{}, func(obj interface{}) { SetObjectDefaults_ShootList(obj.(*ShootList)) }) return nil @@ -57,6 +61,23 @@ func SetObjectDefaults_CloudProfileList(in *CloudProfileList) { } } +func SetObjectDefaults_ControllerRegistration(in *ControllerRegistration) { + for i := range in.Spec.Resources { + a := &in.Spec.Resources[i] + SetDefaults_ControllerResource(a) + } + if in.Spec.Deployment != nil { + SetDefaults_ControllerDeployment(in.Spec.Deployment) + } +} + +func SetObjectDefaults_ControllerRegistrationList(in *ControllerRegistrationList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ControllerRegistration(a) + } +} + func SetObjectDefaults_Project(in *Project) { SetDefaults_Project(in) } @@ -79,6 +100,17 @@ func SetObjectDefaults_SecretBindingList(in *SecretBindingList) { } } +func SetObjectDefaults_Seed(in *Seed) { + SetDefaults_Seed(in) +} + +func SetObjectDefaults_SeedList(in *SeedList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Seed(a) + } +} + func SetObjectDefaults_Shoot(in *Shoot) { SetDefaults_Shoot(in) if in.Spec.Addons != nil { @@ -86,6 +118,9 @@ func SetObjectDefaults_Shoot(in *Shoot) { SetDefaults_NginxIngress(in.Spec.Addons.NginxIngress) } } + if in.Spec.Kubernetes.VerticalPodAutoscaler != nil { + SetDefaults_VerticalPodAutoscaler(in.Spec.Kubernetes.VerticalPodAutoscaler) + } if in.Spec.Maintenance != nil { SetDefaults_Maintenance(in.Spec.Maintenance) } diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/core/zz_generated.deepcopy.go index 4343c16e6..a5deacbf9 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/zz_generated.deepcopy.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -75,7 +75,7 @@ func (in *AdmissionPlugin) DeepCopyInto(out *AdmissionPlugin) { *out = *in if in.Config != nil { in, out := &in.Config, &out.Config - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } return @@ -263,7 +263,7 @@ func (in *BackupBucketSpec) DeepCopyInto(out *BackupBucketSpec) { out.Provider = in.Provider if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } out.SecretRef = in.SecretRef @@ -290,7 +290,7 @@ func (in *BackupBucketStatus) DeepCopyInto(out *BackupBucketStatus) { *out = *in if in.ProviderStatus != nil { in, out := &in.ProviderStatus, &out.ProviderStatus - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } if in.LastOperation != nil { @@ -553,7 +553,7 @@ func (in *CloudProfileSpec) DeepCopyInto(out *CloudProfileSpec) { } if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } if in.Regions != nil { @@ -565,7 +565,7 @@ func (in *CloudProfileSpec) DeepCopyInto(out *CloudProfileSpec) { } if in.SeedSelector != nil { in, out := &in.SeedSelector, &out.SeedSelector - *out = new(metav1.LabelSelector) + *out = new(SeedSelector) (*in).DeepCopyInto(*out) } if in.VolumeTypes != nil { @@ -657,6 +657,11 @@ func (in *Condition) DeepCopyInto(out *Condition) { *out = *in in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + if in.Codes != nil { + in, out := &in.Codes, &out.Codes + *out = make([]ErrorCode, len(*in)) + copy(*out, *in) + } return } @@ -675,7 +680,7 @@ func (in *ContainerRuntime) DeepCopyInto(out *ContainerRuntime) { *out = *in if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } return @@ -696,7 +701,17 @@ func (in *ControllerDeployment) DeepCopyInto(out *ControllerDeployment) { *out = *in if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(ControllerDeploymentPolicy) + **out = **in + } + if in.SeedSelector != nil { + in, out := &in.SeedSelector, &out.SeedSelector + *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } return @@ -803,7 +818,7 @@ func (in *ControllerInstallationStatus) DeepCopyInto(out *ControllerInstallation } if in.ProviderStatus != nil { in, out := &in.ProviderStatus, &out.ProviderStatus - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } return @@ -920,6 +935,11 @@ func (in *ControllerResource) DeepCopyInto(out *ControllerResource) { *out = new(metav1.Duration) **out = **in } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } return } @@ -1028,6 +1048,32 @@ func (in *DNSProvider) DeepCopy() *DNSProvider { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolume) DeepCopyInto(out *DataVolume) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolume. +func (in *DataVolume) DeepCopy() *DataVolume { + if in == nil { + return nil + } + out := new(DataVolume) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Endpoint) DeepCopyInto(out *Endpoint) { *out = *in @@ -1074,9 +1120,14 @@ func (in *Extension) DeepCopyInto(out *Extension) { *out = *in if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = new(bool) + **out = **in + } return } @@ -1103,7 +1154,16 @@ func (in *ExtensionResourceState) DeepCopyInto(out *ExtensionResourceState) { *out = new(string) **out = **in } - in.State.DeepCopyInto(&out.State) + if in.State != nil { + in, out := &in.State, &out.State + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]NamedResourceReference, len(*in)) + copy(*out, *in) + } return } @@ -1260,6 +1320,44 @@ func (in *HorizontalPodAutoscalerConfig) DeepCopy() *HorizontalPodAutoscalerConf return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ingress) DeepCopyInto(out *Ingress) { + *out = *in + in.Controller.DeepCopyInto(&out.Controller) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. +func (in *Ingress) DeepCopy() *Ingress { + if in == nil { + return nil + } + out := new(Ingress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressController) DeepCopyInto(out *IngressController) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressController. +func (in *IngressController) DeepCopy() *IngressController { + if in == nil { + return nil + } + out := new(IngressController) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) { *out = *in @@ -1303,6 +1401,16 @@ func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) { *out = new(ServiceAccountConfig) (*in).DeepCopyInto(*out) } + if in.WatchCacheSizes != nil { + in, out := &in.WatchCacheSizes, &out.WatchCacheSizes + *out = new(WatchCacheSizes) + (*in).DeepCopyInto(*out) + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(KubeAPIServerRequests) + (*in).DeepCopyInto(*out) + } return } @@ -1316,6 +1424,32 @@ func (in *KubeAPIServerConfig) DeepCopy() *KubeAPIServerConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerRequests) DeepCopyInto(out *KubeAPIServerRequests) { + *out = *in + if in.MaxNonMutatingInflight != nil { + in, out := &in.MaxNonMutatingInflight, &out.MaxNonMutatingInflight + *out = new(int32) + **out = **in + } + if in.MaxMutatingInflight != nil { + in, out := &in.MaxMutatingInflight, &out.MaxMutatingInflight + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerRequests. +func (in *KubeAPIServerRequests) DeepCopy() *KubeAPIServerRequests { + if in == nil { + return nil + } + out := new(KubeAPIServerRequests) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerConfig) { *out = *in @@ -1330,6 +1464,11 @@ func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerCo *out = new(int32) **out = **in } + if in.PodEvictionTimeout != nil { + in, out := &in.PodEvictionTimeout, &out.PodEvictionTimeout + *out = new(metav1.Duration) + **out = **in + } return } @@ -1369,6 +1508,11 @@ func (in *KubeProxyConfig) DeepCopy() *KubeProxyConfig { func (in *KubeSchedulerConfig) DeepCopyInto(out *KubeSchedulerConfig) { *out = *in in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig) + if in.KubeMaxPDVols != nil { + in, out := &in.KubeMaxPDVols, &out.KubeMaxPDVols + *out = new(string) + **out = **in + } return } @@ -1441,6 +1585,21 @@ func (in *KubeletConfig) DeepCopyInto(out *KubeletConfig) { *out = new(metav1.Duration) **out = **in } + if in.FailSwapOn != nil { + in, out := &in.FailSwapOn, &out.FailSwapOn + *out = new(bool) + **out = **in + } + if in.KubeReserved != nil { + in, out := &in.KubeReserved, &out.KubeReserved + *out = new(KubeletConfigReserved) + (*in).DeepCopyInto(*out) + } + if in.SystemReserved != nil { + in, out := &in.SystemReserved, &out.SystemReserved + *out = new(KubeletConfigReserved) + (*in).DeepCopyInto(*out) + } return } @@ -1577,6 +1736,42 @@ func (in *KubeletConfigEvictionSoftGracePeriod) DeepCopy() *KubeletConfigEvictio return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfigReserved) DeepCopyInto(out *KubeletConfigReserved) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + x := (*in).DeepCopy() + *out = &x + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + x := (*in).DeepCopy() + *out = &x + } + if in.EphemeralStorage != nil { + in, out := &in.EphemeralStorage, &out.EphemeralStorage + x := (*in).DeepCopy() + *out = &x + } + if in.PID != nil { + in, out := &in.PID, &out.PID + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigReserved. +func (in *KubeletConfigReserved) DeepCopy() *KubeletConfigReserved { + if in == nil { + return nil + } + out := new(KubeletConfigReserved) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Kubernetes) DeepCopyInto(out *Kubernetes) { *out = *in @@ -1615,6 +1810,11 @@ func (in *Kubernetes) DeepCopyInto(out *Kubernetes) { *out = new(KubeletConfig) (*in).DeepCopyInto(*out) } + if in.VerticalPodAutoscaler != nil { + in, out := &in.VerticalPodAutoscaler, &out.VerticalPodAutoscaler + *out = new(VerticalPodAutoscaler) + (*in).DeepCopyInto(*out) + } return } @@ -1780,12 +1980,53 @@ func (in *Machine) DeepCopy() *Machine { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineControllerManagerSettings) DeepCopyInto(out *MachineControllerManagerSettings) { + *out = *in + if in.MachineDrainTimeout != nil { + in, out := &in.MachineDrainTimeout, &out.MachineDrainTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.MachineHealthTimeout != nil { + in, out := &in.MachineHealthTimeout, &out.MachineHealthTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.MachineCreationTimeout != nil { + in, out := &in.MachineCreationTimeout, &out.MachineCreationTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.MaxEvictRetries != nil { + in, out := &in.MaxEvictRetries, &out.MaxEvictRetries + *out = new(int32) + **out = **in + } + if in.NodeConditions != nil { + in, out := &in.NodeConditions, &out.NodeConditions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineControllerManagerSettings. +func (in *MachineControllerManagerSettings) DeepCopy() *MachineControllerManagerSettings { + if in == nil { + return nil + } + out := new(MachineControllerManagerSettings) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineImage) DeepCopyInto(out *MachineImage) { *out = *in if in.Versions != nil { in, out := &in.Versions, &out.Versions - *out = make([]ExpirableVersion, len(*in)) + *out = make([]MachineImageVersion, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1803,6 +2044,30 @@ func (in *MachineImage) DeepCopy() *MachineImage { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineImageVersion) DeepCopyInto(out *MachineImageVersion) { + *out = *in + in.ExpirableVersion.DeepCopyInto(&out.ExpirableVersion) + if in.CRI != nil { + in, out := &in.CRI, &out.CRI + *out = make([]CRI, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineImageVersion. +func (in *MachineImageVersion) DeepCopy() *MachineImageVersion { + if in == nil { + return nil + } + out := new(MachineImageVersion) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineType) DeepCopyInto(out *MachineType) { *out = *in @@ -1862,6 +2127,11 @@ func (in *Maintenance) DeepCopyInto(out *Maintenance) { *out = new(MaintenanceTimeWindow) **out = **in } + if in.ConfineSpecUpdateRollout != nil { + in, out := &in.ConfineSpecUpdateRollout, &out.ConfineSpecUpdateRollout + *out = new(bool) + **out = **in + } return } @@ -1928,12 +2198,29 @@ func (in *Monitoring) DeepCopy() *Monitoring { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedResourceReference) DeepCopyInto(out *NamedResourceReference) { + *out = *in + out.ResourceRef = in.ResourceRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourceReference. +func (in *NamedResourceReference) DeepCopy() *NamedResourceReference { + if in == nil { + return nil + } + out := new(NamedResourceReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Networking) DeepCopyInto(out *Networking) { *out = *in if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } if in.Pods != nil { @@ -2216,7 +2503,7 @@ func (in *Project) DeepCopyInto(out *Project) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -2328,6 +2615,11 @@ func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { *out = new(string) **out = **in } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = new(ProjectTolerations) + (*in).DeepCopyInto(*out) + } return } @@ -2344,6 +2636,14 @@ func (in *ProjectSpec) DeepCopy() *ProjectSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { *out = *in + if in.StaleSinceTimestamp != nil { + in, out := &in.StaleSinceTimestamp, &out.StaleSinceTimestamp + *out = (*in).DeepCopy() + } + if in.StaleAutoDeleteTimestamp != nil { + in, out := &in.StaleAutoDeleteTimestamp, &out.StaleAutoDeleteTimestamp + *out = (*in).DeepCopy() + } return } @@ -2357,17 +2657,47 @@ func (in *ProjectStatus) DeepCopy() *ProjectStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectTolerations) DeepCopyInto(out *ProjectTolerations) { + *out = *in + if in.Defaults != nil { + in, out := &in.Defaults, &out.Defaults + *out = make([]Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Whitelist != nil { + in, out := &in.Whitelist, &out.Whitelist + *out = make([]Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectTolerations. +func (in *ProjectTolerations) DeepCopy() *ProjectTolerations { + if in == nil { + return nil + } + out := new(ProjectTolerations) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Provider) DeepCopyInto(out *Provider) { *out = *in if in.ControlPlaneConfig != nil { in, out := &in.ControlPlaneConfig, &out.ControlPlaneConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } if in.InfrastructureConfig != nil { in, out := &in.InfrastructureConfig, &out.InfrastructureConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } if in.Workers != nil { @@ -2390,23 +2720,6 @@ func (in *Provider) DeepCopy() *Provider { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProviderConfig) DeepCopyInto(out *ProviderConfig) { - *out = *in - in.RawExtension.DeepCopyInto(&out.RawExtension) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfig. -func (in *ProviderConfig) DeepCopy() *ProviderConfig { - if in == nil { - return nil - } - out := new(ProviderConfig) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Quota) DeepCopyInto(out *Quota) { *out = *in @@ -2506,6 +2819,13 @@ func (in *Region) DeepCopyInto(out *Region) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } return } @@ -2520,20 +2840,59 @@ func (in *Region) DeepCopy() *Region { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretBinding) DeepCopyInto(out *SecretBinding) { +func (in *ResourceData) DeepCopyInto(out *ResourceData) { *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.SecretRef = in.SecretRef - if in.Quotas != nil { - in, out := &in.Quotas, &out.Quotas - *out = make([]v1.ObjectReference, len(*in)) - copy(*out, *in) - } + out.CrossVersionObjectReference = in.CrossVersionObjectReference + in.Data.DeepCopyInto(&out.Data) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretBinding. +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceData. +func (in *ResourceData) DeepCopy() *ResourceData { + if in == nil { + return nil + } + out := new(ResourceData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceWatchCacheSize) DeepCopyInto(out *ResourceWatchCacheSize) { + *out = *in + if in.APIGroup != nil { + in, out := &in.APIGroup, &out.APIGroup + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceWatchCacheSize. +func (in *ResourceWatchCacheSize) DeepCopy() *ResourceWatchCacheSize { + if in == nil { + return nil + } + out := new(ResourceWatchCacheSize) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretBinding) DeepCopyInto(out *SecretBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.SecretRef = in.SecretRef + if in.Quotas != nil { + in, out := &in.Quotas, &out.Quotas + *out = make([]v1.ObjectReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretBinding. func (in *SecretBinding) DeepCopy() *SecretBinding { if in == nil { return nil @@ -2617,7 +2976,7 @@ func (in *SeedBackup) DeepCopyInto(out *SeedBackup) { *out = *in if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } if in.Region != nil { @@ -2642,6 +3001,16 @@ func (in *SeedBackup) DeepCopy() *SeedBackup { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SeedDNS) DeepCopyInto(out *SeedDNS) { *out = *in + if in.IngressDomain != nil { + in, out := &in.IngressDomain, &out.IngressDomain + *out = new(string) + **out = **in + } + if in.Provider != nil { + in, out := &in.Provider, &out.Provider + *out = new(SeedDNSProvider) + (*in).DeepCopyInto(*out) + } return } @@ -2655,6 +3024,33 @@ func (in *SeedDNS) DeepCopy() *SeedDNS { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedDNSProvider) DeepCopyInto(out *SeedDNSProvider) { + *out = *in + out.SecretRef = in.SecretRef + if in.Domains != nil { + in, out := &in.Domains, &out.Domains + *out = new(DNSIncludeExclude) + (*in).DeepCopyInto(*out) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = new(DNSIncludeExclude) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedDNSProvider. +func (in *SeedDNSProvider) DeepCopy() *SeedDNSProvider { + if in == nil { + return nil + } + out := new(SeedDNSProvider) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SeedList) DeepCopyInto(out *SeedList) { *out = *in @@ -2724,7 +3120,7 @@ func (in *SeedProvider) DeepCopyInto(out *SeedProvider) { *out = *in if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } return @@ -2740,6 +3136,160 @@ func (in *SeedProvider) DeepCopy() *SeedProvider { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSelector) DeepCopyInto(out *SeedSelector) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ProviderTypes != nil { + in, out := &in.ProviderTypes, &out.ProviderTypes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSelector. +func (in *SeedSelector) DeepCopy() *SeedSelector { + if in == nil { + return nil + } + out := new(SeedSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSettingExcessCapacityReservation) DeepCopyInto(out *SeedSettingExcessCapacityReservation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingExcessCapacityReservation. +func (in *SeedSettingExcessCapacityReservation) DeepCopy() *SeedSettingExcessCapacityReservation { + if in == nil { + return nil + } + out := new(SeedSettingExcessCapacityReservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSettingLoadBalancerServices) DeepCopyInto(out *SeedSettingLoadBalancerServices) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingLoadBalancerServices. +func (in *SeedSettingLoadBalancerServices) DeepCopy() *SeedSettingLoadBalancerServices { + if in == nil { + return nil + } + out := new(SeedSettingLoadBalancerServices) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSettingScheduling) DeepCopyInto(out *SeedSettingScheduling) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingScheduling. +func (in *SeedSettingScheduling) DeepCopy() *SeedSettingScheduling { + if in == nil { + return nil + } + out := new(SeedSettingScheduling) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSettingShootDNS) DeepCopyInto(out *SeedSettingShootDNS) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingShootDNS. +func (in *SeedSettingShootDNS) DeepCopy() *SeedSettingShootDNS { + if in == nil { + return nil + } + out := new(SeedSettingShootDNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSettingVerticalPodAutoscaler) DeepCopyInto(out *SeedSettingVerticalPodAutoscaler) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingVerticalPodAutoscaler. +func (in *SeedSettingVerticalPodAutoscaler) DeepCopy() *SeedSettingVerticalPodAutoscaler { + if in == nil { + return nil + } + out := new(SeedSettingVerticalPodAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSettings) DeepCopyInto(out *SeedSettings) { + *out = *in + if in.ExcessCapacityReservation != nil { + in, out := &in.ExcessCapacityReservation, &out.ExcessCapacityReservation + *out = new(SeedSettingExcessCapacityReservation) + **out = **in + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = new(SeedSettingScheduling) + **out = **in + } + if in.ShootDNS != nil { + in, out := &in.ShootDNS, &out.ShootDNS + *out = new(SeedSettingShootDNS) + **out = **in + } + if in.LoadBalancerServices != nil { + in, out := &in.LoadBalancerServices, &out.LoadBalancerServices + *out = new(SeedSettingLoadBalancerServices) + (*in).DeepCopyInto(*out) + } + if in.VerticalPodAutoscaler != nil { + in, out := &in.VerticalPodAutoscaler, &out.VerticalPodAutoscaler + *out = new(SeedSettingVerticalPodAutoscaler) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettings. +func (in *SeedSettings) DeepCopy() *SeedSettings { + if in == nil { + return nil + } + out := new(SeedSettings) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SeedSpec) DeepCopyInto(out *SeedSpec) { *out = *in @@ -2748,7 +3298,7 @@ func (in *SeedSpec) DeepCopyInto(out *SeedSpec) { *out = new(SeedBackup) (*in).DeepCopyInto(*out) } - out.DNS = in.DNS + in.DNS.DeepCopyInto(&out.DNS) in.Networks.DeepCopyInto(&out.Networks) in.Provider.DeepCopyInto(&out.Provider) if in.SecretRef != nil { @@ -2756,6 +3306,11 @@ func (in *SeedSpec) DeepCopyInto(out *SeedSpec) { *out = new(v1.SecretReference) **out = **in } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(SeedSettings) + (*in).DeepCopyInto(*out) + } if in.Taints != nil { in, out := &in.Taints, &out.Taints *out = make([]SeedTaint, len(*in)) @@ -2768,6 +3323,11 @@ func (in *SeedSpec) DeepCopyInto(out *SeedSpec) { *out = new(SeedVolume) (*in).DeepCopyInto(*out) } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = new(Ingress) + (*in).DeepCopyInto(*out) + } return } @@ -2801,6 +3361,25 @@ func (in *SeedStatus) DeepCopyInto(out *SeedStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ClusterIdentity != nil { + in, out := &in.ClusterIdentity, &out.ClusterIdentity + *out = new(string) + **out = **in + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } return } @@ -2835,6 +3414,24 @@ func (in *SeedTaint) DeepCopy() *SeedTaint { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedTemplate) DeepCopyInto(out *SeedTemplate) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedTemplate. +func (in *SeedTemplate) DeepCopy() *SeedTemplate { + if in == nil { + return nil + } + out := new(SeedTemplate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SeedVolume) DeepCopyInto(out *SeedVolume) { *out = *in @@ -2969,7 +3566,7 @@ func (in *ShootMachineImage) DeepCopyInto(out *ShootMachineImage) { *out = *in if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } return @@ -3059,6 +3656,23 @@ func (in *ShootSpec) DeepCopyInto(out *ShootSpec) { *out = new(string) **out = **in } + if in.SeedSelector != nil { + in, out := &in.SeedSelector, &out.SeedSelector + *out = new(SeedSelector) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]NamedResourceReference, len(*in)) + copy(*out, *in) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -3149,6 +3763,13 @@ func (in *ShootStateSpec) DeepCopyInto(out *ShootStateSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceData, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -3201,6 +3822,11 @@ func (in *ShootStatus) DeepCopyInto(out *ShootStatus) { *out = new(string) **out = **in } + if in.ClusterIdentity != nil { + in, out := &in.ClusterIdentity, &out.ClusterIdentity + *out = new(string) + **out = **in + } return } @@ -3214,6 +3840,78 @@ func (in *ShootStatus) DeepCopy() *ShootStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Toleration) DeepCopyInto(out *Toleration) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration. +func (in *Toleration) DeepCopy() *Toleration { + if in == nil { + return nil + } + out := new(Toleration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerticalPodAutoscaler) DeepCopyInto(out *VerticalPodAutoscaler) { + *out = *in + if in.EvictAfterOOMThreshold != nil { + in, out := &in.EvictAfterOOMThreshold, &out.EvictAfterOOMThreshold + *out = new(metav1.Duration) + **out = **in + } + if in.EvictionRateBurst != nil { + in, out := &in.EvictionRateBurst, &out.EvictionRateBurst + *out = new(int32) + **out = **in + } + if in.EvictionRateLimit != nil { + in, out := &in.EvictionRateLimit, &out.EvictionRateLimit + *out = new(float64) + **out = **in + } + if in.EvictionTolerance != nil { + in, out := &in.EvictionTolerance, &out.EvictionTolerance + *out = new(float64) + **out = **in + } + if in.RecommendationMarginFraction != nil { + in, out := &in.RecommendationMarginFraction, &out.RecommendationMarginFraction + *out = new(float64) + **out = **in + } + if in.UpdaterInterval != nil { + in, out := &in.UpdaterInterval, &out.UpdaterInterval + *out = new(metav1.Duration) + **out = **in + } + if in.RecommenderInterval != nil { + in, out := &in.RecommenderInterval, &out.RecommenderInterval + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalPodAutoscaler. +func (in *VerticalPodAutoscaler) DeepCopy() *VerticalPodAutoscaler { + if in == nil { + return nil + } + out := new(VerticalPodAutoscaler) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Volume) DeepCopyInto(out *Volume) { *out = *in @@ -3266,6 +3964,34 @@ func (in *VolumeType) DeepCopy() *VolumeType { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WatchCacheSizes) DeepCopyInto(out *WatchCacheSizes) { + *out = *in + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(int32) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceWatchCacheSize, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatchCacheSizes. +func (in *WatchCacheSizes) DeepCopy() *WatchCacheSizes { + if in == nil { + return nil + } + out := new(WatchCacheSizes) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Worker) DeepCopyInto(out *Worker) { *out = *in @@ -3311,9 +4037,14 @@ func (in *Worker) DeepCopyInto(out *Worker) { } if in.ProviderConfig != nil { in, out := &in.ProviderConfig, &out.ProviderConfig - *out = new(ProviderConfig) + *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } + if in.SystemComponents != nil { + in, out := &in.SystemComponents, &out.SystemComponents + *out = new(WorkerSystemComponents) + **out = **in + } if in.Taints != nil { in, out := &in.Taints, &out.Taints *out = make([]v1.Taint, len(*in)) @@ -3328,7 +4059,7 @@ func (in *Worker) DeepCopyInto(out *Worker) { } if in.DataVolumes != nil { in, out := &in.DataVolumes, &out.DataVolumes - *out = make([]Volume, len(*in)) + *out = make([]DataVolume, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -3343,6 +4074,11 @@ func (in *Worker) DeepCopyInto(out *Worker) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.MachineControllerManagerSettings != nil { + in, out := &in.MachineControllerManagerSettings, &out.MachineControllerManagerSettings + *out = new(MachineControllerManagerSettings) + (*in).DeepCopyInto(*out) + } return } @@ -3376,3 +4112,19 @@ func (in *WorkerKubernetes) DeepCopy() *WorkerKubernetes { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerSystemComponents) DeepCopyInto(out *WorkerSystemComponents) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerSystemComponents. +func (in *WorkerSystemComponents) DeepCopy() *WorkerSystemComponents { + if in == nil { + return nil + } + out := new(WorkerSystemComponents) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types.go index 9029f9402..54a896317 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types.go @@ -15,9 +15,12 @@ package v1alpha1 import ( - gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + dnsv1alpha1 "github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" ) // Status is the status of an Object. @@ -31,40 +34,21 @@ type Status interface { SetConditions([]gardencorev1beta1.Condition) // GetLastOperation retrieves the LastOperation of a status. // LastOperation may be nil. - GetLastOperation() LastOperation + GetLastOperation() *gardencorev1beta1.LastOperation // GetObservedGeneration retrieves the last generation observed by the extension controller. GetObservedGeneration() int64 // GetLastError retrieves the LastError of a status. // LastError may be nil. - GetLastError() LastError + GetLastError() *gardencorev1beta1.LastError // GetState retrieves the State of the extension GetState() *runtime.RawExtension -} - -// LastOperation is the last operation on an object. -type LastOperation interface { - // GetDescription returns the description of the last operation. - GetDescription() string - // GetLastUpdateTime returns the last update time of the last operation. - GetLastUpdateTime() metav1.Time - // GetProgress returns progress of the last operation. - GetProgress() int32 - // GetState returns the LastOperationState of the last operation. - GetState() gardencorev1beta1.LastOperationState - // GetType returns the LastOperationType of the last operation. - GetType() gardencorev1beta1.LastOperationType -} - -// LastError is the last error on an object. -type LastError interface { - // GetDescription gets the description of the last occurred error. - GetDescription() string - // GetTaskID gets the task ID of the last error. - GetTaskID() *string - // GetCodes gets the error codes of the last error. - GetCodes() []gardencorev1beta1.ErrorCode - // GetLastUpdateTime retrieves the last time the error was updated. - GetLastUpdateTime() *metav1.Time + // SetState sets the State of the extension + SetState(state *runtime.RawExtension) + // GetResources retrieves the list of named resource references referred to in the State by their names. + GetResources() []gardencorev1beta1.NamedResourceReference + // SetResources sets a list of named resource references in the Status, that are referred by + // their names in the State. + SetResources(namedResourceReferences []gardencorev1beta1.NamedResourceReference) } // Spec is the spec section of an Object. @@ -79,9 +63,24 @@ type Spec interface { // Object is an extension object resource. type Object interface { - metav1.Object - // GetExtensionStatus retrieves the object's status. - GetExtensionStatus() Status + client.Object + // GetExtensionSpec retrieves the object's spec. GetExtensionSpec() Spec + // GetExtensionStatus retrieves the object's status. + GetExtensionStatus() Status } + +// ExtensionKinds contains all supported extension kinds. +var ExtensionKinds = sets.NewString( + BackupBucketResource, + BackupEntryResource, + ContainerRuntimeResource, + ControlPlaneResource, + dnsv1alpha1.DNSProviderKind, + ExtensionResource, + InfrastructureResource, + NetworkResource, + OperatingSystemConfigResource, + WorkerResource, +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupbucket.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupbucket.go index d5ddb39fe..b3545246f 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupbucket.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupbucket.go @@ -27,6 +27,12 @@ const BackupBucketResource = "BackupBucket" // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:scope=Cluster,path=backupbuckets,shortName=bb,singular=backupbucket +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name=Type,JSONPath=".spec.type",type=string,description="The type of the cloud provider for this resource." +// +kubebuilder:printcolumn:name=Region,JSONPath=".spec.region",type=string,description="The region into which the backup bucket should be created." +// +kubebuilder:printcolumn:name=State,JSONPath=".status.lastOperation.state",type=string,description="status of the last operation, one of Aborted, Processing, Succeeded, Error, Failed" +// +kubebuilder:printcolumn:name=Age,JSONPath=".metadata.creationTimestamp",type=date,description="creation timestamp" // BackupBucket is a specification for backup bucket. type BackupBucket struct { @@ -34,7 +40,8 @@ type BackupBucket struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - Spec BackupBucketSpec `json:"spec"` + Spec BackupBucketSpec `json:"spec"` + // +optional Status BackupBucketStatus `json:"status"` } diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupentry.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupentry.go index 146ed6086..914395e87 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupentry.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupentry.go @@ -28,6 +28,13 @@ const BackupEntryResource = "BackupEntry" // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:scope=Cluster,path=backupentries,shortName=be,singular=backupentry +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name=Type,JSONPath=".spec.type",type=string,description="The type of the cloud provider for this resource." +// +kubebuilder:printcolumn:name=Region,JSONPath=".spec.region",type=string,description="The region into which the backup entry should be created." +// +kubebuilder:printcolumn:name=Bucket,JSONPath=".spec.bucketName",type=string,description="The name of the bucket into which the backup entry should be created." +// +kubebuilder:printcolumn:name=State,JSONPath=".status.lastOperation.state",type=string,description="status of the last operation, one of Aborted, Processing, Succeeded, Error, Failed" +// +kubebuilder:printcolumn:name=Age,JSONPath=".metadata.creationTimestamp",type=date,description="creation timestamp" // BackupEntry is a specification for backup Entry. type BackupEntry struct { @@ -35,7 +42,8 @@ type BackupEntry struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - Spec BackupEntrySpec `json:"spec"` + Spec BackupEntrySpec `json:"spec"` + // +optional Status BackupEntryStatus `json:"status"` } @@ -67,6 +75,8 @@ type BackupEntrySpec struct { DefaultSpec `json:",inline"` // BackupBucketProviderStatus contains the provider status that has // been generated by the controller responsible for the `BackupBucket` resource. + // +kubebuilder:validation:XPreserveUnknownFields + // +kubebuilder:pruning:PreserveUnknownFields // +optional BackupBucketProviderStatus *runtime.RawExtension `json:"backupBucketProviderStatus,omitempty"` // Region is the region of this Entry. diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_cluster.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_cluster.go index ccfda96fa..df700279c 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_cluster.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_cluster.go @@ -25,6 +25,9 @@ const ClusterResource = "Cluster" // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:scope=Cluster,path=clusters,singular=cluster +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name=Age,JSONPath=".metadata.creationTimestamp",type=date,description="creation timestamp" // Cluster is a specification for a Cluster resource. type Cluster struct { @@ -49,10 +52,16 @@ type ClusterList struct { type ClusterSpec struct { // CloudProfile is a raw extension field that contains the cloudprofile resource referenced // by the shoot that has to be reconciled. + // +kubebuilder:validation:XPreserveUnknownFields + // +kubebuilder:pruning:PreserveUnknownFields CloudProfile runtime.RawExtension `json:"cloudProfile"` // Seed is a raw extension field that contains the seed resource referenced by the shoot that // has to be reconciled. + // +kubebuilder:validation:XPreserveUnknownFields + // +kubebuilder:pruning:PreserveUnknownFields Seed runtime.RawExtension `json:"seed"` // Shoot is a raw extension field that contains the shoot resource that has to be reconciled. + // +kubebuilder:validation:XPreserveUnknownFields + // +kubebuilder:pruning:PreserveUnknownFields Shoot runtime.RawExtension `json:"shoot"` } diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_containerruntime.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_containerruntime.go index a911a1550..7578e242b 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_containerruntime.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_containerruntime.go @@ -31,14 +31,20 @@ const ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:scope=Namespaced,path=containerruntimes,shortName=cr,singular=containerruntime +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name=Type,JSONPath=".spec.type",type=string,description="The type of the Container Runtime resource." +// +kubebuilder:printcolumn:name=Status,JSONPath=".status.lastOperation.state",type=string,description="status of the last operation, one of Aborted, Processing, Succeeded, Error, Failed" +// +kubebuilder:printcolumn:name=Age,JSONPath=".metadata.creationTimestamp",type=date,description="creation timestamp" // ContainerRuntime is a specification for a container runtime resource. type ContainerRuntime struct { metav1.TypeMeta `json:",inline"` // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - Spec ContainerRuntimeSpec `json:"spec"` - Status ContainerRuntimeStatus `json:"status"` + Spec ContainerRuntimeSpec `json:"spec"` + // +optional + Status ContainerRuntimeStatus `json:"status"` } // GetExtensionSpec implements Object. diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_controlplane.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_controlplane.go index 563edbaf9..fec2ca2ea 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_controlplane.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_controlplane.go @@ -27,13 +27,20 @@ const ControlPlaneResource = "ControlPlane" // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:scope=Namespaced,path=controlplanes,shortName=cp,singular=controlplane +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name=Type,JSONPath=".spec.type",type=string,description="The control plane type." +// +kubebuilder:printcolumn:name=Purpose,JSONPath=".spec.purpose",type=string,description="Purpose of control plane resource." +// +kubebuilder:printcolumn:name=Status,JSONPath=".status.lastOperation.state",type=string,description="Status of control plane resource." +// +kubebuilder:printcolumn:name=Age,JSONPath=".metadata.creationTimestamp",type=date,description="creation timestamp" // ControlPlane is a specification for a ControlPlane resource. type ControlPlane struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec ControlPlaneSpec `json:"spec"` + Spec ControlPlaneSpec `json:"spec"` + // +optional Status ControlPlaneStatus `json:"status"` } @@ -72,6 +79,8 @@ type ControlPlaneSpec struct { Purpose *Purpose `json:"purpose,omitempty"` // InfrastructureProviderStatus contains the provider status that has // been generated by the controller responsible for the `Infrastructure` resource. + // +kubebuilder:validation:XPreserveUnknownFields + // +kubebuilder:pruning:PreserveUnknownFields // +optional InfrastructureProviderStatus *runtime.RawExtension `json:"infrastructureProviderStatus,omitempty"` // Region is the region of this control plane. diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_defaults.go index d0ad80e42..a6339036e 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_defaults.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_defaults.go @@ -24,8 +24,9 @@ type DefaultSpec struct { // Type contains the instance of the resource's kind. Type string `json:"type"` // ProviderConfig is the provider specific configuration. + // +kubebuilder:validation:XPreserveUnknownFields + // +kubebuilder:pruning:PreserveUnknownFields // +optional - ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty"` } @@ -47,6 +48,8 @@ func (d *DefaultSpec) GetProviderConfig() *runtime.RawExtension { // DefaultStatus contains common status fields for every extension resource. type DefaultStatus struct { // ProviderStatus contains provider-specific status. + // +kubebuilder:validation:XPreserveUnknownFields + // +kubebuilder:pruning:PreserveUnknownFields // +optional ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty"` // Conditions represents the latest available observations of a Seed's current state. @@ -61,8 +64,13 @@ type DefaultStatus struct { // ObservedGeneration is the most recent generation observed for this resource. ObservedGeneration int64 `json:"observedGeneration,omitempty"` // State can be filled by the operating controller with what ever data it needs. + // +kubebuilder:validation:XPreserveUnknownFields + // +kubebuilder:pruning:PreserveUnknownFields // +optional State *runtime.RawExtension `json:"state,omitempty"` + // Resources holds a list of named resource references that can be referred to in the state by their names. + // +optional + Resources []gardencorev1beta1.NamedResourceReference `json:"resources,omitempty"` } // GetProviderStatus implements Status. @@ -81,18 +89,12 @@ func (d *DefaultStatus) SetConditions(c []gardencorev1beta1.Condition) { } // GetLastOperation implements Status. -func (d *DefaultStatus) GetLastOperation() LastOperation { - if d.LastOperation == nil { - return nil - } +func (d *DefaultStatus) GetLastOperation() *gardencorev1beta1.LastOperation { return d.LastOperation } // GetLastError implements Status. -func (d *DefaultStatus) GetLastError() LastError { - if d.LastError == nil { - return nil - } +func (d *DefaultStatus) GetLastError() *gardencorev1beta1.LastError { return d.LastError } @@ -105,3 +107,18 @@ func (d *DefaultStatus) GetObservedGeneration() int64 { func (d *DefaultStatus) GetState() *runtime.RawExtension { return d.State } + +// SetState implements Status. +func (d *DefaultStatus) SetState(state *runtime.RawExtension) { + d.State = state +} + +// GetResources implements Status. +func (d *DefaultStatus) GetResources() []gardencorev1beta1.NamedResourceReference { + return d.Resources +} + +// SetResources implements Status. +func (d *DefaultStatus) SetResources(namedResourceReference []gardencorev1beta1.NamedResourceReference) { + d.Resources = namedResourceReference +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_extension.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_extension.go index 94953e470..26744ee78 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_extension.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_extension.go @@ -25,6 +25,11 @@ const ExtensionResource = "Extension" // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:scope=Namespaced,path=extensions,shortName=ext,singular=extension +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name=Type,JSONPath=".spec.type",type=string,description="The type of the Extension resource." +// +kubebuilder:printcolumn:name=Status,JSONPath=".status.lastOperation.state",type=string,description="Status of Extension resource." +// +kubebuilder:printcolumn:name=Age,JSONPath=".metadata.creationTimestamp",type=date,description="creation timestamp" // Extension is a specification for a Extension resource. type Extension struct { @@ -32,7 +37,8 @@ type Extension struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - Spec ExtensionSpec `json:"spec"` + Spec ExtensionSpec `json:"spec"` + // +optional Status ExtensionStatus `json:"status"` } diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_infrastructure.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_infrastructure.go index 7e514a3ff..d568a4782 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_infrastructure.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_infrastructure.go @@ -26,6 +26,12 @@ const InfrastructureResource = "Infrastructure" // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:scope=Namespaced,path=infrastructures,shortName=infra,singular=infrastructure +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name=Type,JSONPath=".spec.type",type=string,description="The type of the cloud provider for this resource." +// +kubebuilder:printcolumn:name=Region,JSONPath=".spec.region",type=string,description="The region into which the infrastructure should be deployed." +// +kubebuilder:printcolumn:name=Status,JSONPath=".status.lastOperation.state",type=string,description="Status of infrastructure resource." +// +kubebuilder:printcolumn:name=Age,JSONPath=".metadata.creationTimestamp",type=date,description="creation timestamp" // Infrastructure is a specification for cloud provider infrastructure. type Infrastructure struct { @@ -33,7 +39,8 @@ type Infrastructure struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - Spec InfrastructureSpec `json:"spec"` + Spec InfrastructureSpec `json:"spec"` + // +optional Status InfrastructureStatus `json:"status"` } diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_network.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_network.go index 0888e9417..f47bce118 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_network.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_network.go @@ -25,6 +25,13 @@ const NetworkResource = "Network" // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:scope=Namespaced,path=networks,singular=network +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name=Type,JSONPath=".spec.type",type=string,description="The type of the network provider for this resource." +// +kubebuilder:printcolumn:name=Pod CIDR,JSONPath=".spec.podCIDR",type=string,description="The CIDR that will be used for pods." +// +kubebuilder:printcolumn:name=Service CIDR,JSONPath=".spec.serviceCIDR",type=string,description="The CIDR that will be used for services." +// +kubebuilder:printcolumn:name=Status,JSONPath=".status.lastOperation.state",type=string,description="Status of network resource." +// +kubebuilder:printcolumn:name=Age,JSONPath=".metadata.creationTimestamp",type=date,description="creation timestamp" // Network is the specification for cluster networking. type Network struct { @@ -32,18 +39,19 @@ type Network struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - Spec NetworkSpec `json:"spec"` + Spec NetworkSpec `json:"spec"` + // +optional Status NetworkStatus `json:"status"` } // GetExtensionSpec implements Object. -func (i *Network) GetExtensionSpec() Spec { - return &i.Spec +func (n *Network) GetExtensionSpec() Spec { + return &n.Spec } // GetExtensionStatus implements Object. -func (i *Network) GetExtensionStatus() Status { - return &i.Status +func (n *Network) GetExtensionStatus() Status { + return &n.Status } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_operatingsystemconfig.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_operatingsystemconfig.go index ecbda5780..021a782e6 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_operatingsystemconfig.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_operatingsystemconfig.go @@ -26,6 +26,12 @@ const OperatingSystemConfigResource = "OperatingSystemConfig" // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:scope=Namespaced,path=operatingsystemconfigs,shortName=osc,singular=operatingsystemconfig +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name=Type,JSONPath=".spec.type",type=string,description="The type of the operating system configuration." +// +kubebuilder:printcolumn:name=Purpose,JSONPath=".spec.purpose",type=string,description="The purpose of the operating system configuration." +// +kubebuilder:printcolumn:name=Status,JSONPath=".status.lastOperation.state",type=string,description="Status of operating system configuration." +// +kubebuilder:printcolumn:name=Age,JSONPath=".metadata.creationTimestamp",type=date,description="creation timestamp" // OperatingSystemConfig is a specification for a OperatingSystemConfig resource type OperatingSystemConfig struct { @@ -33,7 +39,8 @@ type OperatingSystemConfig struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - Spec OperatingSystemConfigSpec `json:"spec"` + Spec OperatingSystemConfigSpec `json:"spec"` + // +optional Status OperatingSystemConfigStatus `json:"status"` } @@ -216,7 +223,9 @@ type CRIName string const ( // CRINameContainerD is a constant for ContainerD CRI name - CRINameContainerD = "containerd" + CRINameContainerD CRIName = "containerd" + // CRINameDocker is a constant for Docker CRI name + CRINameDocker CRIName = "docker" ) // ContainerDRuntimeContainersBinFolder is the folder where Container Runtime binaries should be saved for ContainerD usage diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_worker.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_worker.go index 014b51223..95a299ecd 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_worker.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_worker.go @@ -15,6 +15,8 @@ package v1alpha1 import ( + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -28,6 +30,12 @@ const WorkerResource = "Worker" // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:scope=Namespaced,path=workers,singular=worker +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name=Type,JSONPath=".spec.type",type=string,description="The type of the cloud provider for this resource." +// +kubebuilder:printcolumn:name=Region,JSONPath=".spec.region",type=string,description="The region into which the worker should be deployed." +// +kubebuilder:printcolumn:name=Status,JSONPath=".status.lastOperation.state",type=string,description="Status of the worker." +// +kubebuilder:printcolumn:name=Age,JSONPath=".metadata.creationTimestamp",type=date,description="creation timestamp" // Worker is a specification for a Worker resource. type Worker struct { @@ -35,7 +43,8 @@ type Worker struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - Spec WorkerSpec `json:"spec"` + Spec WorkerSpec `json:"spec"` + // +optional Status WorkerStatus `json:"status"` } @@ -68,6 +77,8 @@ type WorkerSpec struct { // InfrastructureProviderStatus is a raw extension field that contains the provider status that has // been generated by the controller responsible for the `Infrastructure` resource. + // +kubebuilder:validation:XPreserveUnknownFields + // +kubebuilder:pruning:PreserveUnknownFields // +optional InfrastructureProviderStatus *runtime.RawExtension `json:"infrastructureProviderStatus,omitempty"` // Region is the name of the region where the worker pool should be deployed to. @@ -111,6 +122,8 @@ type WorkerPool struct { // Name is the name of this worker pool. Name string `json:"name"` // ProviderConfig is a provider specific configuration for the worker pool. + // +kubebuilder:validation:XPreserveUnknownFields + // +kubebuilder:pruning:PreserveUnknownFields // +optional ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty"` // UserData is a base64-encoded string that contains the data that is sent to the provider's APIs @@ -121,13 +134,16 @@ type WorkerPool struct { Volume *Volume `json:"volume,omitempty"` // DataVolumes contains a list of additional worker volumes. // +optional - DataVolumes []Volume `json:"dataVolumes,omitempty"` + DataVolumes []DataVolume `json:"dataVolumes,omitempty"` // KubeletDataVolumeName contains the name of a dataVolume that should be used for storing kubelet state. // +optional KubeletDataVolumeName *string `json:"kubeletDataVolumeName,omitempty"` // Zones contains information about availability zones for this worker pool. // +optional Zones []string `json:"zones,omitempty"` + // MachineControllerManagerSettings contains configurations for different worker-pools. Eg. MachineDrainTimeout, MachineHealthTimeout. + // +optional + MachineControllerManagerSettings *gardencorev1beta1.MachineControllerManagerSettings `json:"machineControllerManager,omitempty"` } // MachineImage contains logical information about the name and the version of the machie image that @@ -155,6 +171,20 @@ type Volume struct { Encrypted *bool `json:"encrypted,omitempty"` } +// DataVolume contains information about a data volume. +type DataVolume struct { + // Name of the volume to make it referencable. + Name string `json:"name"` + // Type is the type of the volume. + // +optional + Type *string `json:"type,omitempty"` + // Size is the of the root volume. + Size string `json:"size"` + // Encrypted determines if the volume should be encrypted. + // +optional + Encrypted *bool `json:"encrypted,omitempty"` +} + // WorkerStatus is the status for a Worker resource. type WorkerStatus struct { // DefaultStatus is a structure containing common fields used by all extension resources. @@ -175,3 +205,6 @@ type MachineDeployment struct { // Maximum is the maximum number for this machine deployment. Maximum int32 `json:"maximum"` } + +// WorkerRollingUpdate is a constant for a condition type indicating a rolling update for any worker pool of the Shoot. +const WorkerRollingUpdate gardencorev1beta1.ConditionType = "RollingUpdate" diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go index 379007bf2..51466ea0f 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -559,6 +559,32 @@ func (in *ControlPlaneStatus) DeepCopy() *ControlPlaneStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolume) DeepCopyInto(out *DataVolume) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolume. +func (in *DataVolume) DeepCopy() *DataVolume { + if in == nil { + return nil + } + out := new(DataVolume) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DefaultSpec) DeepCopyInto(out *DefaultSpec) { *out = *in @@ -610,6 +636,11 @@ func (in *DefaultStatus) DeepCopyInto(out *DefaultStatus) { *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]v1beta1.NamedResourceReference, len(*in)) + copy(*out, *in) + } return } @@ -1353,7 +1384,7 @@ func (in *WorkerPool) DeepCopyInto(out *WorkerPool) { } if in.DataVolumes != nil { in, out := &in.DataVolumes, &out.DataVolumes - *out = make([]Volume, len(*in)) + *out = make([]DataVolume, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1368,6 +1399,11 @@ func (in *WorkerPool) DeepCopyInto(out *WorkerPool) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.MachineControllerManagerSettings != nil { + in, out := &in.MachineControllerManagerSettings, &out.MachineControllerManagerSettings + *out = new(v1beta1.MachineControllerManagerSettings) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/doc.go b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/doc.go new file mode 100644 index 000000000..7c323f911 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/doc.go @@ -0,0 +1,19 @@ +// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package + +// Package seedmanagement is the internal version of the API. +// +groupName=seedmanagement.gardener.cloud +package seedmanagement diff --git a/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/field_constants.go b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/field_constants.go new file mode 100644 index 000000000..f9f7cff12 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/field_constants.go @@ -0,0 +1,23 @@ +// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seedmanagement + +// Field path constants that are specific to the internal API +// representation. +const ( + // ManagedSeedShootName is the field selector path for finding + // the Shoot of a seedmanagement.gardener.cloud/v1alpha1 ManagedSeed. + ManagedSeedShootName = "spec.shoot.name" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/helper/helper.go b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/helper/helper.go new file mode 100644 index 000000000..8794e7254 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/helper/helper.go @@ -0,0 +1,85 @@ +// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package helper + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/gardener/gardener/pkg/apis/core" + corev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + "github.com/gardener/gardener/pkg/apis/seedmanagement" + "github.com/gardener/gardener/pkg/gardenlet/apis/config" + confighelper "github.com/gardener/gardener/pkg/gardenlet/apis/config/helper" + configv1alpha1 "github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/runtime/serializer/versioning" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme *runtime.Scheme + +func init() { + scheme = runtime.NewScheme() + // core schemes are needed here to properly decode the embedded SeedTemplate objects + utilruntime.Must(core.AddToScheme(scheme)) + utilruntime.Must(corev1beta1.AddToScheme(scheme)) + utilruntime.Must(config.AddToScheme(scheme)) + utilruntime.Must(configv1alpha1.AddToScheme(scheme)) +} + +// DecodeGardenletConfiguration decodes the given raw extension into an internal GardenletConfiguration version. +func DecodeGardenletConfiguration(rawConfig *runtime.RawExtension, withDefaults bool) (*config.GardenletConfiguration, error) { + cfg, err := DecodeGardenletConfigurationExternal(rawConfig, withDefaults) + if err != nil { + return nil, err + } + return confighelper.ConvertGardenletConfiguration(cfg) +} + +// DecodeGardenletConfigurationExternal decodes the given raw extension into an external GardenletConfiguration version. +func DecodeGardenletConfigurationExternal(rawConfig *runtime.RawExtension, withDefaults bool) (*configv1alpha1.GardenletConfiguration, error) { + if cfg, ok := rawConfig.Object.(*configv1alpha1.GardenletConfiguration); ok { + return cfg, nil + } + cfg := &configv1alpha1.GardenletConfiguration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: configv1alpha1.SchemeGroupVersion.String(), + Kind: "GardenletConfiguration", + }, + } + if _, _, err := getDecoder(withDefaults).Decode(rawConfig.Raw, nil, cfg); err != nil { + return nil, err + } + return cfg, nil +} + +func getDecoder(withDefaulter bool) runtime.Decoder { + if withDefaulter { + return serializer.NewCodecFactory(scheme).UniversalDecoder() + } + return versioning.NewCodec(nil, serializer.NewCodecFactory(scheme).UniversalDeserializer(), runtime.UnsafeObjectConvertor(scheme), + scheme, scheme, nil, runtime.DisabledGroupVersioner, runtime.InternalGroupVersioner, scheme.Name()) + +} + +// GetBootstrap returns the value of the given Bootstrap, or None if nil. +func GetBootstrap(bootstrap *seedmanagement.Bootstrap) seedmanagement.Bootstrap { + if bootstrap != nil { + return *bootstrap + } + return seedmanagement.BootstrapNone +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/register.go b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/register.go new file mode 100644 index 000000000..6bf928ab3 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/register.go @@ -0,0 +1,52 @@ +// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seedmanagement + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the name of the seedmanagement API group. +const GroupName = "seedmanagement.gardener.cloud" + +// SchemeGroupVersion is group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is a new Scheme Builder which registers our API. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a reference to the Scheme Builder's AddToScheme function. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ManagedSeed{}, + &ManagedSeedList{}, + ) + return nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/types_managedseed.go b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/types_managedseed.go new file mode 100644 index 000000000..3e63dba6a --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/types_managedseed.go @@ -0,0 +1,153 @@ +// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seedmanagement + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + gardencore "github.com/gardener/gardener/pkg/apis/core" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ManagedSeed represents a Shoot that is registered as Seed. +type ManagedSeed struct { + metav1.TypeMeta + // Standard object metadata. + metav1.ObjectMeta + // Specification of the ManagedSeed. + Spec ManagedSeedSpec + // Most recently observed status of the ManagedSeed. + Status ManagedSeedStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ManagedSeedList is a list of ManagedSeed objects. +type ManagedSeedList struct { + metav1.TypeMeta + // Standard list object metadata. + metav1.ListMeta + // Items is the list of ManagedSeeds. + Items []ManagedSeed +} + +// ManagedSeedSpec is the specification of a ManagedSeed. +type ManagedSeedSpec struct { + // Shoot references a Shoot that should be registered as Seed. + Shoot Shoot + // SeedTemplate is a template for a Seed object, that should be used to register a given cluster as a Seed. + // Either SeedTemplate or Gardenlet must be specified. When Seed is specified, the ManagedSeed controller will not deploy a gardenlet into the cluster + // and an existing gardenlet reconciling the new Seed is required. + SeedTemplate *gardencore.SeedTemplate + // Gardenlet specifies that the ManagedSeed controller should deploy a gardenlet into the cluster + // with the given deployment parameters and GardenletConfiguration. + Gardenlet *Gardenlet +} + +// Shoot identifies the Shoot that should be registered as Seed. +type Shoot struct { + // Name is the name of the Shoot that will be registered as Seed. + Name string +} + +// Gardenlet specifies gardenlet deployment parameters and the GardenletConfiguration used to configure gardenlet. +type Gardenlet struct { + // Deployment specifies certain gardenlet deployment parameters, such as the number of replicas, + // the image, etc. + Deployment *GardenletDeployment + // Config is the GardenletConfiguration used to configure gardenlet. + Config runtime.Object + // Bootstrap is the mechanism that should be used for bootstrapping gardenlet connection to the Garden cluster. One of ServiceAccount, BootstrapToken, None. + // If set to ServiceAccount or BootstrapToken, a service account or a bootstrap token will be created in the garden cluster and used to compute the bootstrap kubeconfig. + // If set to None, the gardenClientConnection.kubeconfig field will be used to connect to the Garden cluster. Defaults to BootstrapToken. + Bootstrap *Bootstrap + // MergeWithParent specifies whether the deployment parameters and GardenletConfiguration of the parent gardenlet + // should be merged with the specified deployment parameters and GardenletConfiguration. Defaults to true. + MergeWithParent *bool +} + +// GardenletDeployment specifies certain gardenlet deployment parameters, such as the number of replicas, +// the image, etc. +type GardenletDeployment struct { + // ReplicaCount is the number of gardenlet replicas. Defaults to 1. + ReplicaCount *int32 + // RevisionHistoryLimit is the number of old gardenlet ReplicaSets to retain to allow rollback. Defaults to 1. + RevisionHistoryLimit *int32 + // ServiceAccountName is the name of the ServiceAccount to use to run gardenlet pods. + ServiceAccountName *string + // Image is the gardenlet container image. + Image *Image + // Resources are the compute resources required by the gardenlet container. + Resources *corev1.ResourceRequirements + // PodLabels are the labels on gardenlet pods. + PodLabels map[string]string + // PodAnnotations are the annotations on gardenlet pods. + PodAnnotations map[string]string + // AdditionalVolumes is the list of additional volumes that should be mounted by gardenlet containers. + AdditionalVolumes []corev1.Volume + // AdditionalVolumeMounts is the list of additional pod volumes to mount into the gardenlet container's filesystem. + AdditionalVolumeMounts []corev1.VolumeMount + // Env is the list of environment variables to set in the gardenlet container. + Env []corev1.EnvVar + // VPA specifies whether to enable VPA for gardenlet. Defaults to true. + VPA *bool +} + +// Image specifies container image parameters. +type Image struct { + // Repository is the image repository. + Repository *string + // Tag is the image tag. + Tag *string + // PullPolicy is the image pull policy. One of Always, Never, IfNotPresent. + // Defaults to Always if latest tag is specified, or IfNotPresent otherwise. + PullPolicy *corev1.PullPolicy +} + +// Bootstrap describes a mechanism for bootstrapping gardenlet connection to the Garden cluster. +type Bootstrap string + +const ( + // BootstrapServiceAccount means that a temporary service account should be used for bootstrapping gardenlet connection to the Garden cluster. + BootstrapServiceAccount Bootstrap = "ServiceAccount" + // BootstrapToken means that a bootstrap token should be used for bootstrapping gardenlet connection to the Garden cluster. + BootstrapToken Bootstrap = "BootstrapToken" + // BootstrapNone means that gardenlet connection to the Garden cluster should not be bootstrapped + // and the gardenClientConnection.kubeconfig field should be used to connect to the Garden cluster. + BootstrapNone Bootstrap = "None" +) + +// ManagedSeedStatus is the status of a ManagedSeed. +type ManagedSeedStatus struct { + // Conditions represents the latest available observations of a ManagedSeed's current state. + Conditions []gardencore.Condition + // ObservedGeneration is the most recent generation observed for this ManagedSeed. It corresponds to the + // ManagedSeed's generation, which is updated on mutation by the API Server. + ObservedGeneration int64 +} + +const ( + // ManagedSeedShootExists is a condition type for indicating whether the ManagedSeed's shoot exists. + ManagedSeedShootExists gardencore.ConditionType = "ShootExists" + // ManagedSeedShootReconciled is a condition type for indicating whether the ManagedSeed's shoot has been reconciled. + ManagedSeedShootReconciled gardencore.ConditionType = "ShootReconciled" + // ManagedSeedSeedRegistered is a condition type for indicating whether the ManagedSeed's seed has been registered, + // either directly or by deploying gardenlet into the shoot. + ManagedSeedSeedRegistered gardencore.ConditionType = "SeedRegistered" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/conversions.go b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/conversions.go new file mode 100644 index 000000000..7a9c33b9c --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/conversions.go @@ -0,0 +1,58 @@ +// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "fmt" + + "github.com/gardener/gardener/pkg/apis/seedmanagement" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + if err := scheme.AddFieldLabelConversionFunc( + SchemeGroupVersion.WithKind("ManagedSeed"), + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace", seedmanagement.ManagedSeedShootName: + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ); err != nil { + return err + } + + return nil +} + +func Convert_v1alpha1_Gardenlet_To_seedmanagement_Gardenlet(in *Gardenlet, out *seedmanagement.Gardenlet, s conversion.Scope) error { + if err := autoConvert_v1alpha1_Gardenlet_To_seedmanagement_Gardenlet(in, out, s); err != nil { + return err + } + out.Config = in.Config.Object + return nil +} + +func Convert_seedmanagement_Gardenlet_To_v1alpha1_Gardenlet(in *seedmanagement.Gardenlet, out *Gardenlet, s conversion.Scope) error { + if err := autoConvert_seedmanagement_Gardenlet_To_v1alpha1_Gardenlet(in, out, s); err != nil { + return err + } + out.Config = &runtime.RawExtension{Object: in.Config} + return nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/defaults.go new file mode 100644 index 000000000..aef72db9a --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/defaults.go @@ -0,0 +1,147 @@ +// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "fmt" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + "github.com/gardener/gardener/pkg/apis/seedmanagement/helper" + configv1alpha1 "github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/pointer" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +// SetDefaults_ManagedSeed sets default values for ManagedSeed objects. +func SetDefaults_ManagedSeed(obj *ManagedSeed) { + switch { + case obj.Spec.SeedTemplate != nil: + setDefaultsSeedSpec(&obj.Spec.SeedTemplate.Spec, obj.Name, obj.Namespace, true) + case obj.Spec.Gardenlet != nil: + setDefaultsGardenlet(obj.Spec.Gardenlet, obj.Name, obj.Namespace) + } +} + +// SetDefaults_GardenletDeployment sets default values for GardenletDeployment objects. +func SetDefaults_GardenletDeployment(obj *GardenletDeployment) { + // Set default replica count + if obj.ReplicaCount == nil { + obj.ReplicaCount = pointer.Int32Ptr(1) + } + + // Set default revision history limit + if obj.RevisionHistoryLimit == nil { + obj.RevisionHistoryLimit = pointer.Int32Ptr(1) + } + + // Set default image + if obj.Image == nil { + obj.Image = &Image{} + } + + // Set default VPA + if obj.VPA == nil { + obj.VPA = pointer.BoolPtr(true) + } +} + +// SetDefaults_Image sets default values for Image objects. +func SetDefaults_Image(obj *Image) { + // Set default pull policy + if obj.PullPolicy == nil { + var pullPolicy corev1.PullPolicy + if obj.Tag != nil && *obj.Tag == "latest" { + pullPolicy = corev1.PullAlways + } else { + pullPolicy = corev1.PullIfNotPresent + } + obj.PullPolicy = &pullPolicy + } +} + +func setDefaultsGardenlet(obj *Gardenlet, name, namespace string) { + // Set deployment defaults + if obj.Deployment == nil { + obj.Deployment = &GardenletDeployment{} + } + + // Decode / initialize gardenlet config + var gardenletConfig *configv1alpha1.GardenletConfiguration + if obj.Config != nil { + // Decode gardenlet config to an external version + // Without defaults, since we don't want to set gardenlet config defaults in the resource at this point + // Ignoring errors, since this package does not support proper error reporting + // and if we just return here obj.Config will remain uninitialized, resulting in no error message but + // wrong behavior later on + gardenletConfig, _ = helper.DecodeGardenletConfigurationExternal(obj.Config, false) + } + if gardenletConfig == nil { + gardenletConfig = &configv1alpha1.GardenletConfiguration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: configv1alpha1.SchemeGroupVersion.String(), + Kind: "GardenletConfiguration", + }, + } + } + + // Set gardenlet config defaults + setDefaultsGardenletConfiguration(gardenletConfig, name, namespace) + + // Set gardenlet config back to obj.Config + obj.Config = &runtime.RawExtension{Object: gardenletConfig} + + // Set default garden connection bootstrap + if obj.Bootstrap == nil { + gardenConnectionBootstrap := BootstrapToken + obj.Bootstrap = &gardenConnectionBootstrap + } + + // Set default merge with parent + if obj.MergeWithParent == nil { + obj.MergeWithParent = pointer.BoolPtr(true) + } +} + +func setDefaultsGardenletConfiguration(obj *configv1alpha1.GardenletConfiguration, name, namespace string) { + // Initialize seed config + if obj.SeedConfig == nil { + obj.SeedConfig = &configv1alpha1.SeedConfig{} + } + + // Set seed spec defaults + setDefaultsSeedSpec(&obj.SeedConfig.SeedTemplate.Spec, name, namespace, false) +} + +func setDefaultsSeedSpec(spec *gardencorev1beta1.SeedSpec, name, namespace string, withSecretRef bool) { + if spec.SecretRef == nil && withSecretRef { + spec.SecretRef = &corev1.SecretReference{ + Name: fmt.Sprintf("seed-%s", name), + Namespace: namespace, + } + } + if spec.Backup != nil && spec.Backup.SecretRef == (corev1.SecretReference{}) { + spec.Backup.SecretRef = corev1.SecretReference{ + Name: fmt.Sprintf("backup-%s", name), + Namespace: namespace, + } + } +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/doc.go b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/doc.go new file mode 100644 index 000000000..d5b1cb98e --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/doc.go @@ -0,0 +1,26 @@ +// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v1alpha1 is the v1alpha1 version of the API. +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/gardener/gardener/pkg/apis/seedmanagement +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta +// +k8s:protobuf-gen=package + +//go:generate gen-crd-api-reference-docs -api-dir . -config ../../../../hack/api-reference/seedmanagement-config.json -template-dir ../../../../hack/api-reference/template -out-file ../../../../hack/api-reference/seedmanagement.md + +// Package v1alpha1 is a version of the API. +// +groupName=seedmanagement.gardener.cloud +package v1alpha1 diff --git a/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/generated.pb.go b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/generated.pb.go new file mode 100644 index 000000000..c30a7d578 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/generated.pb.go @@ -0,0 +1,2836 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Gardenlet) Reset() { *m = Gardenlet{} } +func (*Gardenlet) ProtoMessage() {} +func (*Gardenlet) Descriptor() ([]byte, []int) { + return fileDescriptor_d64c05a219673fe5, []int{0} +} +func (m *Gardenlet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Gardenlet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Gardenlet) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gardenlet.Merge(m, src) +} +func (m *Gardenlet) XXX_Size() int { + return m.Size() +} +func (m *Gardenlet) XXX_DiscardUnknown() { + xxx_messageInfo_Gardenlet.DiscardUnknown(m) +} + +var xxx_messageInfo_Gardenlet proto.InternalMessageInfo + +func (m *GardenletDeployment) Reset() { *m = GardenletDeployment{} } +func (*GardenletDeployment) ProtoMessage() {} +func (*GardenletDeployment) Descriptor() ([]byte, []int) { + return fileDescriptor_d64c05a219673fe5, []int{1} +} +func (m *GardenletDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GardenletDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GardenletDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_GardenletDeployment.Merge(m, src) +} +func (m *GardenletDeployment) XXX_Size() int { + return m.Size() +} +func (m *GardenletDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_GardenletDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_GardenletDeployment proto.InternalMessageInfo + +func (m *Image) Reset() { *m = Image{} } +func (*Image) ProtoMessage() {} +func (*Image) Descriptor() ([]byte, []int) { + return fileDescriptor_d64c05a219673fe5, []int{2} +} +func (m *Image) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Image) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Image) XXX_Merge(src proto.Message) { + xxx_messageInfo_Image.Merge(m, src) +} +func (m *Image) XXX_Size() int { + return m.Size() +} +func (m *Image) XXX_DiscardUnknown() { + xxx_messageInfo_Image.DiscardUnknown(m) +} + +var xxx_messageInfo_Image proto.InternalMessageInfo + +func (m *ManagedSeed) Reset() { *m = ManagedSeed{} } +func (*ManagedSeed) ProtoMessage() {} +func (*ManagedSeed) Descriptor() ([]byte, []int) { + return fileDescriptor_d64c05a219673fe5, []int{3} +} +func (m *ManagedSeed) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ManagedSeed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ManagedSeed) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManagedSeed.Merge(m, src) +} +func (m *ManagedSeed) XXX_Size() int { + return m.Size() +} +func (m *ManagedSeed) XXX_DiscardUnknown() { + xxx_messageInfo_ManagedSeed.DiscardUnknown(m) +} + +var xxx_messageInfo_ManagedSeed proto.InternalMessageInfo + +func (m *ManagedSeedList) Reset() { *m = ManagedSeedList{} } +func (*ManagedSeedList) ProtoMessage() {} +func (*ManagedSeedList) Descriptor() ([]byte, []int) { + return fileDescriptor_d64c05a219673fe5, []int{4} +} +func (m *ManagedSeedList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ManagedSeedList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ManagedSeedList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManagedSeedList.Merge(m, src) +} +func (m *ManagedSeedList) XXX_Size() int { + return m.Size() +} +func (m *ManagedSeedList) XXX_DiscardUnknown() { + xxx_messageInfo_ManagedSeedList.DiscardUnknown(m) +} + +var xxx_messageInfo_ManagedSeedList proto.InternalMessageInfo + +func (m *ManagedSeedSpec) Reset() { *m = ManagedSeedSpec{} } +func (*ManagedSeedSpec) ProtoMessage() {} +func (*ManagedSeedSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_d64c05a219673fe5, []int{5} +} +func (m *ManagedSeedSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ManagedSeedSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ManagedSeedSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManagedSeedSpec.Merge(m, src) +} +func (m *ManagedSeedSpec) XXX_Size() int { + return m.Size() +} +func (m *ManagedSeedSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ManagedSeedSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ManagedSeedSpec proto.InternalMessageInfo + +func (m *ManagedSeedStatus) Reset() { *m = ManagedSeedStatus{} } +func (*ManagedSeedStatus) ProtoMessage() {} +func (*ManagedSeedStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_d64c05a219673fe5, []int{6} +} +func (m *ManagedSeedStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ManagedSeedStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ManagedSeedStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManagedSeedStatus.Merge(m, src) +} +func (m *ManagedSeedStatus) XXX_Size() int { + return m.Size() +} +func (m *ManagedSeedStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ManagedSeedStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ManagedSeedStatus proto.InternalMessageInfo + +func (m *Shoot) Reset() { *m = Shoot{} } +func (*Shoot) ProtoMessage() {} +func (*Shoot) Descriptor() ([]byte, []int) { + return fileDescriptor_d64c05a219673fe5, []int{7} +} +func (m *Shoot) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Shoot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Shoot) XXX_Merge(src proto.Message) { + xxx_messageInfo_Shoot.Merge(m, src) +} +func (m *Shoot) XXX_Size() int { + return m.Size() +} +func (m *Shoot) XXX_DiscardUnknown() { + xxx_messageInfo_Shoot.DiscardUnknown(m) +} + +var xxx_messageInfo_Shoot proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Gardenlet)(nil), "github.com.gardener.gardener.pkg.apis.seedmanagement.v1alpha1.Gardenlet") + proto.RegisterType((*GardenletDeployment)(nil), "github.com.gardener.gardener.pkg.apis.seedmanagement.v1alpha1.GardenletDeployment") + proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.seedmanagement.v1alpha1.GardenletDeployment.PodAnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.seedmanagement.v1alpha1.GardenletDeployment.PodLabelsEntry") + proto.RegisterType((*Image)(nil), "github.com.gardener.gardener.pkg.apis.seedmanagement.v1alpha1.Image") + proto.RegisterType((*ManagedSeed)(nil), "github.com.gardener.gardener.pkg.apis.seedmanagement.v1alpha1.ManagedSeed") + proto.RegisterType((*ManagedSeedList)(nil), "github.com.gardener.gardener.pkg.apis.seedmanagement.v1alpha1.ManagedSeedList") + proto.RegisterType((*ManagedSeedSpec)(nil), "github.com.gardener.gardener.pkg.apis.seedmanagement.v1alpha1.ManagedSeedSpec") + proto.RegisterType((*ManagedSeedStatus)(nil), "github.com.gardener.gardener.pkg.apis.seedmanagement.v1alpha1.ManagedSeedStatus") + proto.RegisterType((*Shoot)(nil), "github.com.gardener.gardener.pkg.apis.seedmanagement.v1alpha1.Shoot") +} + +func init() { + proto.RegisterFile("github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/generated.proto", fileDescriptor_d64c05a219673fe5) +} + +var fileDescriptor_d64c05a219673fe5 = []byte{ + // 1192 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x4f, 0x6f, 0xdb, 0x36, + 0x14, 0x8f, 0xec, 0x38, 0x8d, 0xe8, 0x34, 0x7f, 0x98, 0x20, 0x50, 0x0d, 0xcc, 0x0e, 0x7c, 0xca, + 0x0e, 0x95, 0x97, 0xa0, 0x18, 0xb2, 0x61, 0x01, 0x66, 0xa5, 0x59, 0xff, 0x20, 0x69, 0x3c, 0xa6, + 0xcb, 0x80, 0x61, 0x87, 0xd1, 0x12, 0x23, 0x73, 0x91, 0x44, 0x55, 0xa4, 0xdd, 0xfa, 0x36, 0xec, + 0x13, 0x0c, 0xfd, 0x0a, 0x03, 0xf6, 0x59, 0x72, 0x2c, 0x76, 0x2a, 0xb0, 0xc1, 0x68, 0xb4, 0xe3, + 0xbe, 0x41, 0x4f, 0x03, 0x29, 0xd9, 0x92, 0x63, 0x07, 0xcb, 0x96, 0xf4, 0xc6, 0xf7, 0xf8, 0xde, + 0xef, 0xf7, 0xde, 0xe3, 0xe3, 0x23, 0xc1, 0xa1, 0x4b, 0x45, 0xa7, 0xdb, 0x36, 0x6d, 0xe6, 0x37, + 0x5c, 0x1c, 0x39, 0x24, 0x20, 0x51, 0xb6, 0x08, 0xcf, 0xdc, 0x06, 0x0e, 0x29, 0x6f, 0x70, 0x42, + 0x1c, 0x1f, 0x07, 0xd8, 0x25, 0x3e, 0x09, 0x44, 0xa3, 0xb7, 0x85, 0xbd, 0xb0, 0x83, 0xb7, 0x1a, + 0xae, 0x34, 0xc3, 0x82, 0x38, 0x66, 0x18, 0x31, 0xc1, 0xe0, 0x6e, 0x06, 0x67, 0x0e, 0x51, 0xb2, + 0x45, 0x78, 0xe6, 0x9a, 0x12, 0xce, 0x1c, 0x87, 0x33, 0x87, 0x70, 0x15, 0xeb, 0x7a, 0xd1, 0xd8, + 0x2c, 0x22, 0x8d, 0xde, 0x56, 0x9b, 0x88, 0xc9, 0x10, 0x2a, 0xf7, 0xf3, 0x18, 0xcc, 0x65, 0x0d, + 0xa5, 0x6e, 0x77, 0x4f, 0x95, 0xa4, 0x04, 0xb5, 0x4a, 0xcd, 0xeb, 0x67, 0x3b, 0xdc, 0xa4, 0x4c, + 0x02, 0x0f, 0x71, 0x27, 0x20, 0x1f, 0x64, 0x36, 0x3e, 0xb6, 0x3b, 0x34, 0x20, 0x51, 0x3f, 0x8b, + 0xc6, 0x27, 0x02, 0x4f, 0xf3, 0x6a, 0x5c, 0xe5, 0x15, 0x75, 0x03, 0x41, 0x7d, 0x32, 0xe1, 0xf0, + 0xe9, 0xbf, 0x39, 0x70, 0xbb, 0x43, 0x7c, 0x7c, 0xd9, 0xaf, 0xfe, 0x67, 0x01, 0xe8, 0x8f, 0x54, + 0x91, 0x3c, 0x22, 0xe0, 0xcf, 0x1a, 0x00, 0x0e, 0x09, 0x3d, 0xd6, 0x97, 0xb5, 0x35, 0xb4, 0x0d, + 0x6d, 0xb3, 0xbc, 0x8d, 0xcc, 0x1b, 0x1d, 0x8c, 0x39, 0x82, 0x7f, 0x38, 0x42, 0xb6, 0x16, 0xe3, + 0x41, 0x0d, 0x64, 0x32, 0xca, 0xb1, 0xc2, 0xaf, 0xc1, 0x9c, 0xcd, 0x82, 0x53, 0xea, 0x1a, 0x05, + 0xc5, 0x7f, 0xdf, 0x4c, 0x72, 0x33, 0xf3, 0xb9, 0x29, 0xda, 0x34, 0x37, 0x13, 0xe1, 0x97, 0xfb, + 0xaf, 0x04, 0x09, 0x38, 0x65, 0x81, 0x05, 0xe2, 0x41, 0x6d, 0x6e, 0x4f, 0x01, 0xa0, 0x14, 0x08, + 0xee, 0x00, 0xbd, 0xcd, 0x98, 0xe0, 0x22, 0xc2, 0xa1, 0x51, 0xdc, 0xd0, 0x36, 0x75, 0xab, 0x12, + 0x0f, 0x6a, 0xba, 0x35, 0x54, 0xbe, 0xcf, 0x0b, 0x28, 0x33, 0x86, 0xbb, 0x60, 0xc9, 0x27, 0x91, + 0x4b, 0xbe, 0xa5, 0xa2, 0xd3, 0xc2, 0x91, 0xac, 0xca, 0xec, 0x86, 0xb6, 0x39, 0x6f, 0xad, 0xc6, + 0x83, 0xda, 0xd2, 0xe1, 0xf8, 0x16, 0xba, 0x6c, 0x5b, 0x7f, 0xad, 0x83, 0xd5, 0x29, 0xf9, 0xc3, + 0x07, 0x60, 0x21, 0x22, 0xa1, 0x47, 0x6d, 0xbc, 0xc7, 0xba, 0x69, 0xa5, 0x4b, 0xd6, 0x72, 0x3c, + 0xa8, 0x2d, 0xa0, 0x9c, 0x1e, 0x8d, 0x59, 0xc1, 0x03, 0xb0, 0x16, 0x91, 0x1e, 0x95, 0x69, 0x3e, + 0xa6, 0x5c, 0xb0, 0xa8, 0x7f, 0x40, 0x7d, 0x2a, 0x54, 0x9d, 0x4a, 0x96, 0x11, 0x0f, 0x6a, 0x6b, + 0x68, 0xca, 0x3e, 0x9a, 0xea, 0x05, 0xbf, 0x02, 0x90, 0x93, 0xa8, 0x47, 0x6d, 0xd2, 0xb4, 0x6d, + 0x89, 0xff, 0x0c, 0xfb, 0x24, 0xad, 0xce, 0x7a, 0x3c, 0xa8, 0xc1, 0xe3, 0x89, 0x5d, 0x34, 0xc5, + 0x03, 0x12, 0x50, 0xa2, 0x3e, 0x76, 0x89, 0x2a, 0x4c, 0x79, 0xfb, 0xe1, 0x0d, 0xdb, 0xe5, 0x89, + 0xc4, 0xb2, 0xf4, 0x78, 0x50, 0x2b, 0xa9, 0x25, 0x4a, 0xd0, 0xe1, 0x37, 0x40, 0x8f, 0x08, 0x67, + 0xdd, 0xc8, 0x26, 0xdc, 0x28, 0x29, 0xaa, 0xcd, 0x5c, 0x67, 0x98, 0xf2, 0x02, 0x9a, 0xbd, 0x2d, + 0x13, 0xa5, 0x46, 0x88, 0xbc, 0xe8, 0xd2, 0x48, 0x81, 0x73, 0xeb, 0xae, 0x3c, 0xed, 0xe1, 0x0e, + 0x47, 0x19, 0x12, 0x7c, 0xad, 0x01, 0x3d, 0x64, 0xce, 0x01, 0x6e, 0x13, 0x8f, 0x1b, 0x73, 0x1b, + 0xc5, 0xcd, 0xf2, 0x36, 0xbe, 0xfd, 0x8e, 0x37, 0x5b, 0x43, 0x8e, 0xfd, 0x40, 0x44, 0x7d, 0x6b, + 0xe5, 0x7c, 0x50, 0x9b, 0x91, 0x41, 0x8d, 0xf4, 0x28, 0x0b, 0x03, 0xfe, 0xa6, 0x81, 0xc5, 0x90, + 0x39, 0xcd, 0x20, 0x60, 0x02, 0x0b, 0xca, 0x02, 0x6e, 0xdc, 0x51, 0x91, 0x9d, 0x7e, 0x98, 0xc8, + 0x72, 0x44, 0x49, 0x78, 0xeb, 0x69, 0x78, 0x8b, 0xe3, 0x9b, 0xe8, 0x52, 0x54, 0xd0, 0x06, 0x2b, + 0xd8, 0x71, 0xa8, 0x14, 0xb0, 0x77, 0xc2, 0xbc, 0xae, 0x4f, 0xb8, 0x31, 0xaf, 0x42, 0xad, 0x4c, + 0x3b, 0x9c, 0xc4, 0xc4, 0xba, 0x97, 0xc2, 0xaf, 0x34, 0x2f, 0x3b, 0xa3, 0x49, 0x3c, 0xf8, 0x12, + 0xac, 0x5f, 0x56, 0x1e, 0xca, 0xee, 0xe3, 0x86, 0xae, 0x98, 0x6a, 0x57, 0x33, 0x29, 0x3b, 0xab, + 0x9a, 0xd2, 0xad, 0x37, 0xa7, 0xc2, 0xa0, 0x2b, 0xe0, 0xe1, 0x67, 0xa0, 0x48, 0x82, 0x9e, 0x01, + 0xae, 0xce, 0x67, 0x3f, 0xe8, 0x9d, 0xe0, 0xc8, 0x2a, 0xa7, 0x04, 0xc5, 0xfd, 0xa0, 0x87, 0xa4, + 0x0f, 0xbc, 0x07, 0x8a, 0xbd, 0x10, 0x1b, 0x65, 0x35, 0x2b, 0xee, 0xc8, 0xad, 0x93, 0x56, 0x13, + 0x49, 0x5d, 0xe5, 0x0b, 0xb0, 0x38, 0xde, 0x0c, 0x70, 0x19, 0x14, 0xcf, 0x48, 0x5f, 0x0d, 0x01, + 0x1d, 0xc9, 0x25, 0x5c, 0x03, 0xa5, 0x1e, 0xf6, 0xba, 0x44, 0x5d, 0x6d, 0x1d, 0x25, 0xc2, 0xe7, + 0x85, 0x1d, 0xad, 0xd2, 0x04, 0xab, 0x53, 0x0e, 0xec, 0xbf, 0x40, 0xd4, 0x7f, 0xd5, 0x40, 0x72, + 0xb5, 0xa0, 0x09, 0x40, 0x44, 0x42, 0xc6, 0xa9, 0x9c, 0x0a, 0x89, 0x73, 0x32, 0x9a, 0xd1, 0x48, + 0x8b, 0x72, 0x16, 0x32, 0x2b, 0x81, 0x93, 0xb9, 0xac, 0x27, 0x59, 0x3d, 0xc7, 0x2e, 0x92, 0x3a, + 0x78, 0x04, 0x40, 0xd8, 0xf5, 0xbc, 0x16, 0xf3, 0xa8, 0xdd, 0x4f, 0xa7, 0x48, 0x43, 0x42, 0xb5, + 0x46, 0xda, 0xf7, 0x83, 0xda, 0x47, 0x93, 0x2f, 0xa6, 0x99, 0x19, 0xa0, 0x1c, 0x44, 0xfd, 0x8f, + 0x02, 0x28, 0x1f, 0xaa, 0x16, 0x76, 0x8e, 0x09, 0x71, 0xe0, 0x0f, 0x60, 0x5e, 0xbe, 0x96, 0x0e, + 0x16, 0x38, 0x7d, 0x98, 0x3e, 0xb9, 0xf2, 0x61, 0x50, 0x77, 0x40, 0x5a, 0xcb, 0x33, 0x3a, 0x6a, + 0xff, 0x48, 0x6c, 0x71, 0x48, 0x04, 0xb6, 0x60, 0x7a, 0x4e, 0x20, 0xd3, 0xa1, 0x11, 0x2a, 0x0c, + 0xc1, 0x2c, 0x0f, 0x89, 0x9d, 0x3e, 0x3b, 0xcf, 0x6e, 0x78, 0xd5, 0x72, 0xb1, 0x1f, 0x87, 0xc4, + 0xb6, 0x16, 0x52, 0xee, 0x59, 0x29, 0x21, 0xc5, 0x04, 0x5f, 0x81, 0x39, 0x2e, 0xb0, 0xe8, 0x72, + 0x55, 0xb0, 0xf2, 0x76, 0xeb, 0x16, 0x39, 0x15, 0xae, 0xb5, 0x98, 0xb2, 0xce, 0x25, 0x32, 0x4a, + 0xf9, 0xea, 0xef, 0x34, 0xb0, 0x94, 0xb3, 0x3e, 0xa0, 0x5c, 0xc0, 0xef, 0x27, 0x2a, 0x6c, 0x5e, + 0xaf, 0xc2, 0xd2, 0x5b, 0xd5, 0x77, 0x39, 0x65, 0x9b, 0x1f, 0x6a, 0x72, 0xd5, 0x65, 0xa0, 0x44, + 0x05, 0xf1, 0xb9, 0x51, 0x50, 0xd7, 0xe9, 0xe9, 0xed, 0xa5, 0x6a, 0xdd, 0x4d, 0x69, 0x4b, 0x4f, + 0x24, 0x01, 0x4a, 0x78, 0xea, 0x7f, 0x17, 0xc6, 0x52, 0x94, 0x65, 0x87, 0x14, 0x94, 0x78, 0x87, + 0xb1, 0xe1, 0xd7, 0xe6, 0xa6, 0x6f, 0xd5, 0xb1, 0xc4, 0xca, 0xe8, 0x95, 0x88, 0x12, 0x06, 0xd8, + 0x03, 0x0b, 0xd2, 0xed, 0x39, 0xf1, 0x43, 0x0f, 0x0b, 0x92, 0x76, 0xd5, 0x97, 0xd7, 0x64, 0x4c, + 0x07, 0x8c, 0xfa, 0xa6, 0x9a, 0xc7, 0x39, 0x9c, 0xe4, 0x93, 0x90, 0xd7, 0xa0, 0x31, 0x1e, 0xd8, + 0x05, 0xba, 0x3b, 0x9c, 0xf2, 0x69, 0x5b, 0x3d, 0xbe, 0xad, 0x57, 0x23, 0x79, 0x47, 0x47, 0x22, + 0xca, 0x98, 0xea, 0xbf, 0x6b, 0x60, 0x65, 0xa2, 0xfd, 0xe0, 0x0b, 0x00, 0x6c, 0x16, 0x24, 0xc3, + 0x95, 0x1b, 0x9a, 0x3a, 0xf9, 0xdd, 0xff, 0x53, 0x82, 0xbd, 0x21, 0x4a, 0x76, 0x87, 0x47, 0x2a, + 0x8e, 0x72, 0x24, 0xf0, 0x29, 0x80, 0xac, 0x2d, 0xbf, 0x29, 0xc4, 0x79, 0x94, 0x7c, 0x76, 0x29, + 0x0b, 0x54, 0xf5, 0x8b, 0x56, 0x25, 0xf5, 0x85, 0x47, 0x13, 0x16, 0x68, 0x8a, 0x57, 0xfd, 0x63, + 0x90, 0x9c, 0x29, 0xdc, 0x00, 0xb3, 0x81, 0xfc, 0x1d, 0x25, 0x23, 0x72, 0x74, 0x95, 0xd5, 0x9f, + 0x48, 0xed, 0x58, 0xe6, 0xf9, 0x45, 0x75, 0xe6, 0xcd, 0x45, 0x75, 0xe6, 0xed, 0x45, 0x75, 0xe6, + 0xa7, 0xb8, 0xaa, 0x9d, 0xc7, 0x55, 0xed, 0x4d, 0x5c, 0xd5, 0xde, 0xc6, 0x55, 0xed, 0x5d, 0x5c, + 0xd5, 0x7e, 0xf9, 0xab, 0x3a, 0xf3, 0xdd, 0xfc, 0xb0, 0xa6, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, + 0x82, 0x26, 0x17, 0x3f, 0x3d, 0x0d, 0x00, 0x00, +} + +func (m *Gardenlet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Gardenlet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Gardenlet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MergeWithParent != nil { + i-- + if *m.MergeWithParent { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Bootstrap != nil { + i -= len(*m.Bootstrap) + copy(dAtA[i:], *m.Bootstrap) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Bootstrap))) + i-- + dAtA[i] = 0x1a + } + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Deployment != nil { + { + size, err := m.Deployment.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GardenletDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GardenletDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GardenletDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.VPA != nil { + i-- + if *m.VPA { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + if len(m.AdditionalVolumeMounts) > 0 { + for iNdEx := len(m.AdditionalVolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AdditionalVolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if len(m.AdditionalVolumes) > 0 { + for iNdEx := len(m.AdditionalVolumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AdditionalVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.PodAnnotations) > 0 { + keysForPodAnnotations := make([]string, 0, len(m.PodAnnotations)) + for k := range m.PodAnnotations { + keysForPodAnnotations = append(keysForPodAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodAnnotations) + for iNdEx := len(keysForPodAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.PodAnnotations[string(keysForPodAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForPodAnnotations[iNdEx]) + copy(dAtA[i:], keysForPodAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPodAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.PodLabels) > 0 { + keysForPodLabels := make([]string, 0, len(m.PodLabels)) + for k := range m.PodLabels { + keysForPodLabels = append(keysForPodLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodLabels) + for iNdEx := len(keysForPodLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.PodLabels[string(keysForPodLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForPodLabels[iNdEx]) + copy(dAtA[i:], keysForPodLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPodLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if m.Resources != nil { + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.ServiceAccountName != nil { + i -= len(*m.ServiceAccountName) + copy(dAtA[i:], *m.ServiceAccountName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ServiceAccountName))) + i-- + dAtA[i] = 0x1a + } + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x10 + } + if m.ReplicaCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ReplicaCount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Image) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Image) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Image) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PullPolicy != nil { + i -= len(*m.PullPolicy) + copy(dAtA[i:], *m.PullPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PullPolicy))) + i-- + dAtA[i] = 0x1a + } + if m.Tag != nil { + i -= len(*m.Tag) + copy(dAtA[i:], *m.Tag) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Tag))) + i-- + dAtA[i] = 0x12 + } + if m.Repository != nil { + i -= len(*m.Repository) + copy(dAtA[i:], *m.Repository) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Repository))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ManagedSeed) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ManagedSeed) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ManagedSeed) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ManagedSeedList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ManagedSeedList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ManagedSeedList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ManagedSeedSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ManagedSeedSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ManagedSeedSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Gardenlet != nil { + { + size, err := m.Gardenlet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.SeedTemplate != nil { + { + size, err := m.SeedTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Shoot.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ManagedSeedStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ManagedSeedStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ManagedSeedStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x10 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Shoot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Shoot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Shoot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Gardenlet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Deployment != nil { + l = m.Deployment.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Bootstrap != nil { + l = len(*m.Bootstrap) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MergeWithParent != nil { + n += 2 + } + return n +} + +func (m *GardenletDeployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ReplicaCount != nil { + n += 1 + sovGenerated(uint64(*m.ReplicaCount)) + } + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + if m.ServiceAccountName != nil { + l = len(*m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Image != nil { + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.PodLabels) > 0 { + for k, v := range m.PodLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.PodAnnotations) > 0 { + for k, v := range m.PodAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.AdditionalVolumes) > 0 { + for _, e := range m.AdditionalVolumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.AdditionalVolumeMounts) > 0 { + for _, e := range m.AdditionalVolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.VPA != nil { + n += 2 + } + return n +} + +func (m *Image) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Repository != nil { + l = len(*m.Repository) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Tag != nil { + l = len(*m.Tag) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PullPolicy != nil { + l = len(*m.PullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ManagedSeed) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ManagedSeedList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ManagedSeedSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Shoot.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.SeedTemplate != nil { + l = m.SeedTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Gardenlet != nil { + l = m.Gardenlet.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ManagedSeedStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + return n +} + +func (m *Shoot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Gardenlet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Gardenlet{`, + `Deployment:` + strings.Replace(this.Deployment.String(), "GardenletDeployment", "GardenletDeployment", 1) + `,`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "RawExtension", "runtime.RawExtension", 1) + `,`, + `Bootstrap:` + valueToStringGenerated(this.Bootstrap) + `,`, + `MergeWithParent:` + valueToStringGenerated(this.MergeWithParent) + `,`, + `}`, + }, "") + return s +} +func (this *GardenletDeployment) String() string { + if this == nil { + return "nil" + } + repeatedStringForAdditionalVolumes := "[]Volume{" + for _, f := range this.AdditionalVolumes { + repeatedStringForAdditionalVolumes += fmt.Sprintf("%v", f) + "," + } + repeatedStringForAdditionalVolumes += "}" + repeatedStringForAdditionalVolumeMounts := "[]VolumeMount{" + for _, f := range this.AdditionalVolumeMounts { + repeatedStringForAdditionalVolumeMounts += fmt.Sprintf("%v", f) + "," + } + repeatedStringForAdditionalVolumeMounts += "}" + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + keysForPodLabels := make([]string, 0, len(this.PodLabels)) + for k := range this.PodLabels { + keysForPodLabels = append(keysForPodLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodLabels) + mapStringForPodLabels := "map[string]string{" + for _, k := range keysForPodLabels { + mapStringForPodLabels += fmt.Sprintf("%v: %v,", k, this.PodLabels[k]) + } + mapStringForPodLabels += "}" + keysForPodAnnotations := make([]string, 0, len(this.PodAnnotations)) + for k := range this.PodAnnotations { + keysForPodAnnotations = append(keysForPodAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodAnnotations) + mapStringForPodAnnotations := "map[string]string{" + for _, k := range keysForPodAnnotations { + mapStringForPodAnnotations += fmt.Sprintf("%v: %v,", k, this.PodAnnotations[k]) + } + mapStringForPodAnnotations += "}" + s := strings.Join([]string{`&GardenletDeployment{`, + `ReplicaCount:` + valueToStringGenerated(this.ReplicaCount) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `ServiceAccountName:` + valueToStringGenerated(this.ServiceAccountName) + `,`, + `Image:` + strings.Replace(this.Image.String(), "Image", "Image", 1) + `,`, + `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v1.ResourceRequirements", 1) + `,`, + `PodLabels:` + mapStringForPodLabels + `,`, + `PodAnnotations:` + mapStringForPodAnnotations + `,`, + `AdditionalVolumes:` + repeatedStringForAdditionalVolumes + `,`, + `AdditionalVolumeMounts:` + repeatedStringForAdditionalVolumeMounts + `,`, + `Env:` + repeatedStringForEnv + `,`, + `VPA:` + valueToStringGenerated(this.VPA) + `,`, + `}`, + }, "") + return s +} +func (this *Image) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Image{`, + `Repository:` + valueToStringGenerated(this.Repository) + `,`, + `Tag:` + valueToStringGenerated(this.Tag) + `,`, + `PullPolicy:` + valueToStringGenerated(this.PullPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *ManagedSeed) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ManagedSeed{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ManagedSeedSpec", "ManagedSeedSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ManagedSeedStatus", "ManagedSeedStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ManagedSeedList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ManagedSeed{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ManagedSeed", "ManagedSeed", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ManagedSeedList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ManagedSeedSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ManagedSeedSpec{`, + `Shoot:` + strings.Replace(strings.Replace(this.Shoot.String(), "Shoot", "Shoot", 1), `&`, ``, 1) + `,`, + `SeedTemplate:` + strings.Replace(fmt.Sprintf("%v", this.SeedTemplate), "SeedTemplate", "v1beta1.SeedTemplate", 1) + `,`, + `Gardenlet:` + strings.Replace(this.Gardenlet.String(), "Gardenlet", "Gardenlet", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ManagedSeedStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ManagedSeedStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `}`, + }, "") + return s +} +func (this *Shoot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Shoot{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Gardenlet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Gardenlet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Gardenlet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Deployment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Deployment == nil { + m.Deployment = &GardenletDeployment{} + } + if err := m.Deployment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &runtime.RawExtension{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bootstrap", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := Bootstrap(dAtA[iNdEx:postIndex]) + m.Bootstrap = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MergeWithParent", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.MergeWithParent = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GardenletDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GardenletDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GardenletDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReplicaCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReplicaCount = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ServiceAccountName = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &Image{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &v1.ResourceRequirements{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodLabels == nil { + m.PodLabels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PodLabels[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAnnotations == nil { + m.PodAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PodAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalVolumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalVolumes = append(m.AdditionalVolumes, v1.Volume{}) + if err := m.AdditionalVolumes[len(m.AdditionalVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalVolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalVolumeMounts = append(m.AdditionalVolumeMounts, v1.VolumeMount{}) + if err := m.AdditionalVolumeMounts[len(m.AdditionalVolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VPA", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.VPA = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Image) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Image: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Image: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Repository = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Tag = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PullPolicy(dAtA[iNdEx:postIndex]) + m.PullPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ManagedSeed) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ManagedSeed: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ManagedSeed: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ManagedSeedList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ManagedSeedList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ManagedSeedList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ManagedSeed{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ManagedSeedSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ManagedSeedSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ManagedSeedSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shoot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Shoot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeedTemplate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SeedTemplate == nil { + m.SeedTemplate = &v1beta1.SeedTemplate{} + } + if err := m.SeedTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gardenlet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Gardenlet == nil { + m.Gardenlet = &Gardenlet{} + } + if err := m.Gardenlet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ManagedSeedStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ManagedSeedStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ManagedSeedStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1beta1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Shoot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Shoot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Shoot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/generated.proto b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/generated.proto new file mode 100644 index 000000000..d9998a8a0 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/generated.proto @@ -0,0 +1,179 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package github.com.gardener.gardener.pkg.apis.seedmanagement.v1alpha1; + +import "github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto"; +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// Gardenlet specifies gardenlet deployment parameters and the GardenletConfiguration used to configure gardenlet. +message Gardenlet { + // Deployment specifies certain gardenlet deployment parameters, such as the number of replicas, + // the image, etc. + // +optional + optional GardenletDeployment deployment = 1; + + // Config is the GardenletConfiguration used to configure gardenlet. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension config = 2; + + // Bootstrap is the mechanism that should be used for bootstrapping gardenlet connection to the Garden cluster. One of ServiceAccount, BootstrapToken, None. + // If set to ServiceAccount or BootstrapToken, a service account or a bootstrap token will be created in the garden cluster and used to compute the bootstrap kubeconfig. + // If set to None, the gardenClientConnection.kubeconfig field will be used to connect to the Garden cluster. Defaults to BootstrapToken. + // +optional + optional string bootstrap = 3; + + // MergeWithParent specifies whether the deployment parameters and GardenletConfiguration of the parent gardenlet + // should be merged with the specified deployment parameters and GardenletConfiguration. Defaults to true. + // +optional + optional bool mergeWithParent = 4; +} + +// GardenletDeployment specifies certain gardenlet deployment parameters, such as the number of replicas, +// the image, etc. +message GardenletDeployment { + // ReplicaCount is the number of gardenlet replicas. Defaults to 1. + // +optional + optional int32 replicaCount = 1; + + // RevisionHistoryLimit is the number of old gardenlet ReplicaSets to retain to allow rollback. Defaults to 1. + // +optional + optional int32 revisionHistoryLimit = 2; + + // ServiceAccountName is the name of the ServiceAccount to use to run gardenlet pods. + // +optional + optional string serviceAccountName = 3; + + // Image is the gardenlet container image. + // +optional + optional Image image = 4; + + // Resources are the compute resources required by the gardenlet container. + // +optional + optional k8s.io.api.core.v1.ResourceRequirements resources = 5; + + // PodLabels are the labels on gardenlet pods. + // +optional + map podLabels = 6; + + // PodAnnotations are the annotations on gardenlet pods. + // +optional + map podAnnotations = 7; + + // AdditionalVolumes is the list of additional volumes that should be mounted by gardenlet containers. + // +optional + repeated k8s.io.api.core.v1.Volume additionalVolumes = 8; + + // AdditionalVolumeMounts is the list of additional pod volumes to mount into the gardenlet container's filesystem. + // +optional + repeated k8s.io.api.core.v1.VolumeMount additionalVolumeMounts = 9; + + // Env is the list of environment variables to set in the gardenlet container. + // +optional + repeated k8s.io.api.core.v1.EnvVar env = 10; + + // VPA specifies whether to enable VPA for gardenlet. Defaults to true. + // +optional + optional bool vpa = 11; +} + +// Image specifies container image parameters. +message Image { + // Repository is the image repository. + // +optional + optional string repository = 1; + + // Tag is the image tag. + // +optional + optional string tag = 2; + + // PullPolicy is the image pull policy. One of Always, Never, IfNotPresent. + // Defaults to Always if latest tag is specified, or IfNotPresent otherwise. + // +optional + optional string pullPolicy = 3; +} + +// ManagedSeed represents a Shoot that is registered as Seed. +message ManagedSeed { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the ManagedSeed. + // +optional + optional ManagedSeedSpec spec = 2; + + // Most recently observed status of the ManagedSeed. + // +optional + optional ManagedSeedStatus status = 3; +} + +// ManagedSeedList is a list of ManagedSeed objects. +message ManagedSeedList { + // Standard list object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ManagedSeeds. + repeated ManagedSeed items = 2; +} + +// ManagedSeedSpec is the specification of a ManagedSeed. +message ManagedSeedSpec { + // Shoot references a Shoot that should be registered as Seed. + optional Shoot shoot = 1; + + // SeedTemplate is a template for a Seed object, that should be used to register a given cluster as a Seed. + // Either SeedTemplate or Gardenlet must be specified. When Seed is specified, the ManagedSeed controller will not deploy a gardenlet into the cluster + // and an existing gardenlet reconciling the new Seed is required. + // +optional + optional github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedTemplate seedTemplate = 2; + + // Gardenlet specifies that the ManagedSeed controller should deploy a gardenlet into the cluster + // with the given deployment parameters and GardenletConfiguration. + // +optional + optional Gardenlet gardenlet = 3; +} + +// ManagedSeedStatus is the status of a ManagedSeed. +message ManagedSeedStatus { + // Conditions represents the latest available observations of a ManagedSeed's current state. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + repeated github.com.gardener.gardener.pkg.apis.core.v1beta1.Condition conditions = 1; + + // ObservedGeneration is the most recent generation observed for this ManagedSeed. It corresponds to the + // ManagedSeed's generation, which is updated on mutation by the API Server. + optional int64 observedGeneration = 2; +} + +// Shoot identifies the Shoot that should be registered as Seed. +message Shoot { + // Name is the name of the Shoot that will be registered as Seed. + optional string name = 1; +} + diff --git a/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/register.go b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/register.go new file mode 100644 index 000000000..52947891f --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/register.go @@ -0,0 +1,55 @@ +// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the name of the seedmanagement API group. +const GroupName = "seedmanagement.gardener.cloud" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is a new Scheme Builder which registers our API. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + localSchemeBuilder = &SchemeBuilder + // AddToScheme is a reference to the Scheme Builder's AddToScheme function. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ManagedSeed{}, + &ManagedSeedList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/types_managedseed.go b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/types_managedseed.go new file mode 100644 index 000000000..b339bc08a --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/types_managedseed.go @@ -0,0 +1,180 @@ +// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ManagedSeed represents a Shoot that is registered as Seed. +type ManagedSeed struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the ManagedSeed. + // +optional + Spec ManagedSeedSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Most recently observed status of the ManagedSeed. + // +optional + Status ManagedSeedStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ManagedSeedList is a list of ManagedSeed objects. +type ManagedSeedList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of ManagedSeeds. + Items []ManagedSeed `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ManagedSeedSpec is the specification of a ManagedSeed. +type ManagedSeedSpec struct { + // Shoot references a Shoot that should be registered as Seed. + Shoot Shoot `json:"shoot" protobuf:"bytes,1,opt,name=shoot"` + // SeedTemplate is a template for a Seed object, that should be used to register a given cluster as a Seed. + // Either SeedTemplate or Gardenlet must be specified. When Seed is specified, the ManagedSeed controller will not deploy a gardenlet into the cluster + // and an existing gardenlet reconciling the new Seed is required. + // +optional + SeedTemplate *gardencorev1beta1.SeedTemplate `json:"seedTemplate,omitempty" protobuf:"bytes,2,opt,name=seedTemplate"` + // Gardenlet specifies that the ManagedSeed controller should deploy a gardenlet into the cluster + // with the given deployment parameters and GardenletConfiguration. + // +optional + Gardenlet *Gardenlet `json:"gardenlet,omitempty" protobuf:"bytes,3,opt,name=gardenlet"` +} + +// Shoot identifies the Shoot that should be registered as Seed. +type Shoot struct { + // Name is the name of the Shoot that will be registered as Seed. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// Gardenlet specifies gardenlet deployment parameters and the GardenletConfiguration used to configure gardenlet. +type Gardenlet struct { + // Deployment specifies certain gardenlet deployment parameters, such as the number of replicas, + // the image, etc. + // +optional + Deployment *GardenletDeployment `json:"deployment,omitempty" protobuf:"bytes,1,opt,name=deployment"` + // Config is the GardenletConfiguration used to configure gardenlet. + // +optional + Config *runtime.RawExtension `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` + // Bootstrap is the mechanism that should be used for bootstrapping gardenlet connection to the Garden cluster. One of ServiceAccount, BootstrapToken, None. + // If set to ServiceAccount or BootstrapToken, a service account or a bootstrap token will be created in the garden cluster and used to compute the bootstrap kubeconfig. + // If set to None, the gardenClientConnection.kubeconfig field will be used to connect to the Garden cluster. Defaults to BootstrapToken. + // +optional + Bootstrap *Bootstrap `json:"bootstrap,omitempty" protobuf:"bytes,3,opt,name=bootstrap"` + // MergeWithParent specifies whether the deployment parameters and GardenletConfiguration of the parent gardenlet + // should be merged with the specified deployment parameters and GardenletConfiguration. Defaults to true. + // +optional + MergeWithParent *bool `json:"mergeWithParent,omitempty" protobuf:"varint,4,opt,name=mergeWithParent"` +} + +// GardenletDeployment specifies certain gardenlet deployment parameters, such as the number of replicas, +// the image, etc. +type GardenletDeployment struct { + // ReplicaCount is the number of gardenlet replicas. Defaults to 1. + // +optional + ReplicaCount *int32 `json:"replicaCount,omitempty" protobuf:"varint,1,opt,name=replicaCount"` + // RevisionHistoryLimit is the number of old gardenlet ReplicaSets to retain to allow rollback. Defaults to 1. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,2,opt,name=revisionHistoryLimit"` + // ServiceAccountName is the name of the ServiceAccount to use to run gardenlet pods. + // +optional + ServiceAccountName *string `json:"serviceAccountName,omitempty" protobuf:"bytes,3,opt,name=serviceAccountName"` + // Image is the gardenlet container image. + // +optional + Image *Image `json:"image,omitempty" protobuf:"bytes,4,opt,name=image"` + // Resources are the compute resources required by the gardenlet container. + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,5,opt,name=resources"` + // PodLabels are the labels on gardenlet pods. + // +optional + PodLabels map[string]string `json:"podLabels,omitempty" protobuf:"bytes,6,opt,name=podLabels"` + // PodAnnotations are the annotations on gardenlet pods. + // +optional + PodAnnotations map[string]string `json:"podAnnotations,omitempty" protobuf:"bytes,7,opt,name=podAnnotations"` + // AdditionalVolumes is the list of additional volumes that should be mounted by gardenlet containers. + // +optional + AdditionalVolumes []corev1.Volume `json:"additionalVolumes,omitempty" protobuf:"bytes,8,rep,name=additionalVolumes"` + // AdditionalVolumeMounts is the list of additional pod volumes to mount into the gardenlet container's filesystem. + // +optional + AdditionalVolumeMounts []corev1.VolumeMount `json:"additionalVolumeMounts,omitempty" protobuf:"bytes,9,rep,name=additionalVolumeMounts"` + // Env is the list of environment variables to set in the gardenlet container. + // +optional + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,10,rep,name=env"` + // VPA specifies whether to enable VPA for gardenlet. Defaults to true. + // +optional + VPA *bool `json:"vpa,omitempty" protobuf:"bytes,11,rep,name=vpa"` +} + +// Image specifies container image parameters. +type Image struct { + // Repository is the image repository. + // +optional + Repository *string `json:"repository,omitempty" protobuf:"bytes,1,opt,name=repository"` + // Tag is the image tag. + // +optional + Tag *string `json:"tag,omitempty" protobuf:"bytes,2,opt,name=tag"` + // PullPolicy is the image pull policy. One of Always, Never, IfNotPresent. + // Defaults to Always if latest tag is specified, or IfNotPresent otherwise. + // +optional + PullPolicy *corev1.PullPolicy `json:"pullPolicy,omitempty" protobuf:"bytes,3,opt,name=pullPolicy"` +} + +// Bootstrap describes a mechanism for bootstrapping gardenlet connection to the Garden cluster. +type Bootstrap string + +const ( + // BootstrapServiceAccount means that a temporary service account should be used for bootstrapping gardenlet connection to the Garden cluster. + BootstrapServiceAccount Bootstrap = "ServiceAccount" + // BootstrapToken means that a bootstrap token should be used for bootstrapping gardenlet connection to the Garden cluster. + BootstrapToken Bootstrap = "BootstrapToken" + // BootstrapNone means that gardenlet connection to the Garden cluster should not be bootstrapped + // and the gardenClientConnection.kubeconfig field should be used to connect to the Garden cluster. + BootstrapNone Bootstrap = "None" +) + +// ManagedSeedStatus is the status of a ManagedSeed. +type ManagedSeedStatus struct { + // Conditions represents the latest available observations of a ManagedSeed's current state. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []gardencorev1beta1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + // ObservedGeneration is the most recent generation observed for this ManagedSeed. It corresponds to the + // ManagedSeed's generation, which is updated on mutation by the API Server. + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,2,opt,name=observedGeneration"` +} + +const ( + // ManagedSeedShootExists is a condition type for indicating whether the ManagedSeed's shoot exists. + ManagedSeedShootExists gardencorev1beta1.ConditionType = "ShootExists" + // ManagedSeedShootReconciled is a condition type for indicating whether the ManagedSeed's shoot has been reconciled. + ManagedSeedShootReconciled gardencorev1beta1.ConditionType = "ShootReconciled" + // ManagedSeedSeedRegistered is a condition type for indicating whether the ManagedSeed's seed has been registered, + // either directly or by deploying gardenlet into the shoot. + ManagedSeedSeedRegistered gardencorev1beta1.ConditionType = "SeedRegistered" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/zz_generated.conversion.go b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/zz_generated.conversion.go new file mode 100644 index 000000000..a06bc2751 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,380 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + unsafe "unsafe" + + core "github.com/gardener/gardener/pkg/apis/core" + v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + seedmanagement "github.com/gardener/gardener/pkg/apis/seedmanagement" + v1 "k8s.io/api/core/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*GardenletDeployment)(nil), (*seedmanagement.GardenletDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_GardenletDeployment_To_seedmanagement_GardenletDeployment(a.(*GardenletDeployment), b.(*seedmanagement.GardenletDeployment), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*seedmanagement.GardenletDeployment)(nil), (*GardenletDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_seedmanagement_GardenletDeployment_To_v1alpha1_GardenletDeployment(a.(*seedmanagement.GardenletDeployment), b.(*GardenletDeployment), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Image)(nil), (*seedmanagement.Image)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Image_To_seedmanagement_Image(a.(*Image), b.(*seedmanagement.Image), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*seedmanagement.Image)(nil), (*Image)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_seedmanagement_Image_To_v1alpha1_Image(a.(*seedmanagement.Image), b.(*Image), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ManagedSeed)(nil), (*seedmanagement.ManagedSeed)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ManagedSeed_To_seedmanagement_ManagedSeed(a.(*ManagedSeed), b.(*seedmanagement.ManagedSeed), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*seedmanagement.ManagedSeed)(nil), (*ManagedSeed)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_seedmanagement_ManagedSeed_To_v1alpha1_ManagedSeed(a.(*seedmanagement.ManagedSeed), b.(*ManagedSeed), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ManagedSeedList)(nil), (*seedmanagement.ManagedSeedList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ManagedSeedList_To_seedmanagement_ManagedSeedList(a.(*ManagedSeedList), b.(*seedmanagement.ManagedSeedList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*seedmanagement.ManagedSeedList)(nil), (*ManagedSeedList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_seedmanagement_ManagedSeedList_To_v1alpha1_ManagedSeedList(a.(*seedmanagement.ManagedSeedList), b.(*ManagedSeedList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ManagedSeedSpec)(nil), (*seedmanagement.ManagedSeedSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ManagedSeedSpec_To_seedmanagement_ManagedSeedSpec(a.(*ManagedSeedSpec), b.(*seedmanagement.ManagedSeedSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*seedmanagement.ManagedSeedSpec)(nil), (*ManagedSeedSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_seedmanagement_ManagedSeedSpec_To_v1alpha1_ManagedSeedSpec(a.(*seedmanagement.ManagedSeedSpec), b.(*ManagedSeedSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ManagedSeedStatus)(nil), (*seedmanagement.ManagedSeedStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ManagedSeedStatus_To_seedmanagement_ManagedSeedStatus(a.(*ManagedSeedStatus), b.(*seedmanagement.ManagedSeedStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*seedmanagement.ManagedSeedStatus)(nil), (*ManagedSeedStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_seedmanagement_ManagedSeedStatus_To_v1alpha1_ManagedSeedStatus(a.(*seedmanagement.ManagedSeedStatus), b.(*ManagedSeedStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Shoot)(nil), (*seedmanagement.Shoot)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Shoot_To_seedmanagement_Shoot(a.(*Shoot), b.(*seedmanagement.Shoot), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*seedmanagement.Shoot)(nil), (*Shoot)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_seedmanagement_Shoot_To_v1alpha1_Shoot(a.(*seedmanagement.Shoot), b.(*Shoot), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*seedmanagement.Gardenlet)(nil), (*Gardenlet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_seedmanagement_Gardenlet_To_v1alpha1_Gardenlet(a.(*seedmanagement.Gardenlet), b.(*Gardenlet), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*Gardenlet)(nil), (*seedmanagement.Gardenlet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Gardenlet_To_seedmanagement_Gardenlet(a.(*Gardenlet), b.(*seedmanagement.Gardenlet), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_Gardenlet_To_seedmanagement_Gardenlet(in *Gardenlet, out *seedmanagement.Gardenlet, s conversion.Scope) error { + out.Deployment = (*seedmanagement.GardenletDeployment)(unsafe.Pointer(in.Deployment)) + // WARNING: in.Config requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/runtime.RawExtension vs k8s.io/apimachinery/pkg/runtime.Object) + out.Bootstrap = (*seedmanagement.Bootstrap)(unsafe.Pointer(in.Bootstrap)) + out.MergeWithParent = (*bool)(unsafe.Pointer(in.MergeWithParent)) + return nil +} + +func autoConvert_seedmanagement_Gardenlet_To_v1alpha1_Gardenlet(in *seedmanagement.Gardenlet, out *Gardenlet, s conversion.Scope) error { + out.Deployment = (*GardenletDeployment)(unsafe.Pointer(in.Deployment)) + // WARNING: in.Config requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/runtime.Object vs *k8s.io/apimachinery/pkg/runtime.RawExtension) + out.Bootstrap = (*Bootstrap)(unsafe.Pointer(in.Bootstrap)) + out.MergeWithParent = (*bool)(unsafe.Pointer(in.MergeWithParent)) + return nil +} + +func autoConvert_v1alpha1_GardenletDeployment_To_seedmanagement_GardenletDeployment(in *GardenletDeployment, out *seedmanagement.GardenletDeployment, s conversion.Scope) error { + out.ReplicaCount = (*int32)(unsafe.Pointer(in.ReplicaCount)) + out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) + out.ServiceAccountName = (*string)(unsafe.Pointer(in.ServiceAccountName)) + out.Image = (*seedmanagement.Image)(unsafe.Pointer(in.Image)) + out.Resources = (*v1.ResourceRequirements)(unsafe.Pointer(in.Resources)) + out.PodLabels = *(*map[string]string)(unsafe.Pointer(&in.PodLabels)) + out.PodAnnotations = *(*map[string]string)(unsafe.Pointer(&in.PodAnnotations)) + out.AdditionalVolumes = *(*[]v1.Volume)(unsafe.Pointer(&in.AdditionalVolumes)) + out.AdditionalVolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.AdditionalVolumeMounts)) + out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env)) + out.VPA = (*bool)(unsafe.Pointer(in.VPA)) + return nil +} + +// Convert_v1alpha1_GardenletDeployment_To_seedmanagement_GardenletDeployment is an autogenerated conversion function. +func Convert_v1alpha1_GardenletDeployment_To_seedmanagement_GardenletDeployment(in *GardenletDeployment, out *seedmanagement.GardenletDeployment, s conversion.Scope) error { + return autoConvert_v1alpha1_GardenletDeployment_To_seedmanagement_GardenletDeployment(in, out, s) +} + +func autoConvert_seedmanagement_GardenletDeployment_To_v1alpha1_GardenletDeployment(in *seedmanagement.GardenletDeployment, out *GardenletDeployment, s conversion.Scope) error { + out.ReplicaCount = (*int32)(unsafe.Pointer(in.ReplicaCount)) + out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) + out.ServiceAccountName = (*string)(unsafe.Pointer(in.ServiceAccountName)) + out.Image = (*Image)(unsafe.Pointer(in.Image)) + out.Resources = (*v1.ResourceRequirements)(unsafe.Pointer(in.Resources)) + out.PodLabels = *(*map[string]string)(unsafe.Pointer(&in.PodLabels)) + out.PodAnnotations = *(*map[string]string)(unsafe.Pointer(&in.PodAnnotations)) + out.AdditionalVolumes = *(*[]v1.Volume)(unsafe.Pointer(&in.AdditionalVolumes)) + out.AdditionalVolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.AdditionalVolumeMounts)) + out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env)) + out.VPA = (*bool)(unsafe.Pointer(in.VPA)) + return nil +} + +// Convert_seedmanagement_GardenletDeployment_To_v1alpha1_GardenletDeployment is an autogenerated conversion function. +func Convert_seedmanagement_GardenletDeployment_To_v1alpha1_GardenletDeployment(in *seedmanagement.GardenletDeployment, out *GardenletDeployment, s conversion.Scope) error { + return autoConvert_seedmanagement_GardenletDeployment_To_v1alpha1_GardenletDeployment(in, out, s) +} + +func autoConvert_v1alpha1_Image_To_seedmanagement_Image(in *Image, out *seedmanagement.Image, s conversion.Scope) error { + out.Repository = (*string)(unsafe.Pointer(in.Repository)) + out.Tag = (*string)(unsafe.Pointer(in.Tag)) + out.PullPolicy = (*v1.PullPolicy)(unsafe.Pointer(in.PullPolicy)) + return nil +} + +// Convert_v1alpha1_Image_To_seedmanagement_Image is an autogenerated conversion function. +func Convert_v1alpha1_Image_To_seedmanagement_Image(in *Image, out *seedmanagement.Image, s conversion.Scope) error { + return autoConvert_v1alpha1_Image_To_seedmanagement_Image(in, out, s) +} + +func autoConvert_seedmanagement_Image_To_v1alpha1_Image(in *seedmanagement.Image, out *Image, s conversion.Scope) error { + out.Repository = (*string)(unsafe.Pointer(in.Repository)) + out.Tag = (*string)(unsafe.Pointer(in.Tag)) + out.PullPolicy = (*v1.PullPolicy)(unsafe.Pointer(in.PullPolicy)) + return nil +} + +// Convert_seedmanagement_Image_To_v1alpha1_Image is an autogenerated conversion function. +func Convert_seedmanagement_Image_To_v1alpha1_Image(in *seedmanagement.Image, out *Image, s conversion.Scope) error { + return autoConvert_seedmanagement_Image_To_v1alpha1_Image(in, out, s) +} + +func autoConvert_v1alpha1_ManagedSeed_To_seedmanagement_ManagedSeed(in *ManagedSeed, out *seedmanagement.ManagedSeed, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_ManagedSeedSpec_To_seedmanagement_ManagedSeedSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_ManagedSeedStatus_To_seedmanagement_ManagedSeedStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_ManagedSeed_To_seedmanagement_ManagedSeed is an autogenerated conversion function. +func Convert_v1alpha1_ManagedSeed_To_seedmanagement_ManagedSeed(in *ManagedSeed, out *seedmanagement.ManagedSeed, s conversion.Scope) error { + return autoConvert_v1alpha1_ManagedSeed_To_seedmanagement_ManagedSeed(in, out, s) +} + +func autoConvert_seedmanagement_ManagedSeed_To_v1alpha1_ManagedSeed(in *seedmanagement.ManagedSeed, out *ManagedSeed, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_seedmanagement_ManagedSeedSpec_To_v1alpha1_ManagedSeedSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_seedmanagement_ManagedSeedStatus_To_v1alpha1_ManagedSeedStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_seedmanagement_ManagedSeed_To_v1alpha1_ManagedSeed is an autogenerated conversion function. +func Convert_seedmanagement_ManagedSeed_To_v1alpha1_ManagedSeed(in *seedmanagement.ManagedSeed, out *ManagedSeed, s conversion.Scope) error { + return autoConvert_seedmanagement_ManagedSeed_To_v1alpha1_ManagedSeed(in, out, s) +} + +func autoConvert_v1alpha1_ManagedSeedList_To_seedmanagement_ManagedSeedList(in *ManagedSeedList, out *seedmanagement.ManagedSeedList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]seedmanagement.ManagedSeed, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_ManagedSeed_To_seedmanagement_ManagedSeed(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1alpha1_ManagedSeedList_To_seedmanagement_ManagedSeedList is an autogenerated conversion function. +func Convert_v1alpha1_ManagedSeedList_To_seedmanagement_ManagedSeedList(in *ManagedSeedList, out *seedmanagement.ManagedSeedList, s conversion.Scope) error { + return autoConvert_v1alpha1_ManagedSeedList_To_seedmanagement_ManagedSeedList(in, out, s) +} + +func autoConvert_seedmanagement_ManagedSeedList_To_v1alpha1_ManagedSeedList(in *seedmanagement.ManagedSeedList, out *ManagedSeedList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ManagedSeed, len(*in)) + for i := range *in { + if err := Convert_seedmanagement_ManagedSeed_To_v1alpha1_ManagedSeed(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_seedmanagement_ManagedSeedList_To_v1alpha1_ManagedSeedList is an autogenerated conversion function. +func Convert_seedmanagement_ManagedSeedList_To_v1alpha1_ManagedSeedList(in *seedmanagement.ManagedSeedList, out *ManagedSeedList, s conversion.Scope) error { + return autoConvert_seedmanagement_ManagedSeedList_To_v1alpha1_ManagedSeedList(in, out, s) +} + +func autoConvert_v1alpha1_ManagedSeedSpec_To_seedmanagement_ManagedSeedSpec(in *ManagedSeedSpec, out *seedmanagement.ManagedSeedSpec, s conversion.Scope) error { + if err := Convert_v1alpha1_Shoot_To_seedmanagement_Shoot(&in.Shoot, &out.Shoot, s); err != nil { + return err + } + if in.SeedTemplate != nil { + in, out := &in.SeedTemplate, &out.SeedTemplate + *out = new(core.SeedTemplate) + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(*in, *out, 0); err != nil { + return err + } + } else { + out.SeedTemplate = nil + } + if in.Gardenlet != nil { + in, out := &in.Gardenlet, &out.Gardenlet + *out = new(seedmanagement.Gardenlet) + if err := Convert_v1alpha1_Gardenlet_To_seedmanagement_Gardenlet(*in, *out, s); err != nil { + return err + } + } else { + out.Gardenlet = nil + } + return nil +} + +// Convert_v1alpha1_ManagedSeedSpec_To_seedmanagement_ManagedSeedSpec is an autogenerated conversion function. +func Convert_v1alpha1_ManagedSeedSpec_To_seedmanagement_ManagedSeedSpec(in *ManagedSeedSpec, out *seedmanagement.ManagedSeedSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_ManagedSeedSpec_To_seedmanagement_ManagedSeedSpec(in, out, s) +} + +func autoConvert_seedmanagement_ManagedSeedSpec_To_v1alpha1_ManagedSeedSpec(in *seedmanagement.ManagedSeedSpec, out *ManagedSeedSpec, s conversion.Scope) error { + if err := Convert_seedmanagement_Shoot_To_v1alpha1_Shoot(&in.Shoot, &out.Shoot, s); err != nil { + return err + } + if in.SeedTemplate != nil { + in, out := &in.SeedTemplate, &out.SeedTemplate + *out = new(v1beta1.SeedTemplate) + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(*in, *out, 0); err != nil { + return err + } + } else { + out.SeedTemplate = nil + } + if in.Gardenlet != nil { + in, out := &in.Gardenlet, &out.Gardenlet + *out = new(Gardenlet) + if err := Convert_seedmanagement_Gardenlet_To_v1alpha1_Gardenlet(*in, *out, s); err != nil { + return err + } + } else { + out.Gardenlet = nil + } + return nil +} + +// Convert_seedmanagement_ManagedSeedSpec_To_v1alpha1_ManagedSeedSpec is an autogenerated conversion function. +func Convert_seedmanagement_ManagedSeedSpec_To_v1alpha1_ManagedSeedSpec(in *seedmanagement.ManagedSeedSpec, out *ManagedSeedSpec, s conversion.Scope) error { + return autoConvert_seedmanagement_ManagedSeedSpec_To_v1alpha1_ManagedSeedSpec(in, out, s) +} + +func autoConvert_v1alpha1_ManagedSeedStatus_To_seedmanagement_ManagedSeedStatus(in *ManagedSeedStatus, out *seedmanagement.ManagedSeedStatus, s conversion.Scope) error { + out.Conditions = *(*[]core.Condition)(unsafe.Pointer(&in.Conditions)) + out.ObservedGeneration = in.ObservedGeneration + return nil +} + +// Convert_v1alpha1_ManagedSeedStatus_To_seedmanagement_ManagedSeedStatus is an autogenerated conversion function. +func Convert_v1alpha1_ManagedSeedStatus_To_seedmanagement_ManagedSeedStatus(in *ManagedSeedStatus, out *seedmanagement.ManagedSeedStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_ManagedSeedStatus_To_seedmanagement_ManagedSeedStatus(in, out, s) +} + +func autoConvert_seedmanagement_ManagedSeedStatus_To_v1alpha1_ManagedSeedStatus(in *seedmanagement.ManagedSeedStatus, out *ManagedSeedStatus, s conversion.Scope) error { + out.Conditions = *(*[]v1beta1.Condition)(unsafe.Pointer(&in.Conditions)) + out.ObservedGeneration = in.ObservedGeneration + return nil +} + +// Convert_seedmanagement_ManagedSeedStatus_To_v1alpha1_ManagedSeedStatus is an autogenerated conversion function. +func Convert_seedmanagement_ManagedSeedStatus_To_v1alpha1_ManagedSeedStatus(in *seedmanagement.ManagedSeedStatus, out *ManagedSeedStatus, s conversion.Scope) error { + return autoConvert_seedmanagement_ManagedSeedStatus_To_v1alpha1_ManagedSeedStatus(in, out, s) +} + +func autoConvert_v1alpha1_Shoot_To_seedmanagement_Shoot(in *Shoot, out *seedmanagement.Shoot, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +// Convert_v1alpha1_Shoot_To_seedmanagement_Shoot is an autogenerated conversion function. +func Convert_v1alpha1_Shoot_To_seedmanagement_Shoot(in *Shoot, out *seedmanagement.Shoot, s conversion.Scope) error { + return autoConvert_v1alpha1_Shoot_To_seedmanagement_Shoot(in, out, s) +} + +func autoConvert_seedmanagement_Shoot_To_v1alpha1_Shoot(in *seedmanagement.Shoot, out *Shoot, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +// Convert_seedmanagement_Shoot_To_v1alpha1_Shoot is an autogenerated conversion function. +func Convert_seedmanagement_Shoot_To_v1alpha1_Shoot(in *seedmanagement.Shoot, out *Shoot, s conversion.Scope) error { + return autoConvert_seedmanagement_Shoot_To_v1alpha1_Shoot(in, out, s) +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..853bc4240 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,302 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gardenlet) DeepCopyInto(out *Gardenlet) { + *out = *in + if in.Deployment != nil { + in, out := &in.Deployment, &out.Deployment + *out = new(GardenletDeployment) + (*in).DeepCopyInto(*out) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Bootstrap != nil { + in, out := &in.Bootstrap, &out.Bootstrap + *out = new(Bootstrap) + **out = **in + } + if in.MergeWithParent != nil { + in, out := &in.MergeWithParent, &out.MergeWithParent + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gardenlet. +func (in *Gardenlet) DeepCopy() *Gardenlet { + if in == nil { + return nil + } + out := new(Gardenlet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GardenletDeployment) DeepCopyInto(out *GardenletDeployment) { + *out = *in + if in.ReplicaCount != nil { + in, out := &in.ReplicaCount, &out.ReplicaCount + *out = new(int32) + **out = **in + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.ServiceAccountName != nil { + in, out := &in.ServiceAccountName, &out.ServiceAccountName + *out = new(string) + **out = **in + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(Image) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.PodLabels != nil { + in, out := &in.PodLabels, &out.PodLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PodAnnotations != nil { + in, out := &in.PodAnnotations, &out.PodAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AdditionalVolumes != nil { + in, out := &in.AdditionalVolumes, &out.AdditionalVolumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalVolumeMounts != nil { + in, out := &in.AdditionalVolumeMounts, &out.AdditionalVolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPA != nil { + in, out := &in.VPA, &out.VPA + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GardenletDeployment. +func (in *GardenletDeployment) DeepCopy() *GardenletDeployment { + if in == nil { + return nil + } + out := new(GardenletDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + if in.Repository != nil { + in, out := &in.Repository, &out.Repository + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.PullPolicy != nil { + in, out := &in.PullPolicy, &out.PullPolicy + *out = new(v1.PullPolicy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedSeed) DeepCopyInto(out *ManagedSeed) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedSeed. +func (in *ManagedSeed) DeepCopy() *ManagedSeed { + if in == nil { + return nil + } + out := new(ManagedSeed) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedSeed) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedSeedList) DeepCopyInto(out *ManagedSeedList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ManagedSeed, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedSeedList. +func (in *ManagedSeedList) DeepCopy() *ManagedSeedList { + if in == nil { + return nil + } + out := new(ManagedSeedList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedSeedList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedSeedSpec) DeepCopyInto(out *ManagedSeedSpec) { + *out = *in + out.Shoot = in.Shoot + if in.SeedTemplate != nil { + in, out := &in.SeedTemplate, &out.SeedTemplate + *out = new(v1beta1.SeedTemplate) + (*in).DeepCopyInto(*out) + } + if in.Gardenlet != nil { + in, out := &in.Gardenlet, &out.Gardenlet + *out = new(Gardenlet) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedSeedSpec. +func (in *ManagedSeedSpec) DeepCopy() *ManagedSeedSpec { + if in == nil { + return nil + } + out := new(ManagedSeedSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedSeedStatus) DeepCopyInto(out *ManagedSeedStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1beta1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedSeedStatus. +func (in *ManagedSeedStatus) DeepCopy() *ManagedSeedStatus { + if in == nil { + return nil + } + out := new(ManagedSeedStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Shoot) DeepCopyInto(out *Shoot) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Shoot. +func (in *Shoot) DeepCopy() *Shoot { + if in == nil { + return nil + } + out := new(Shoot) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/zz_generated.defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/zz_generated.defaults.go new file mode 100644 index 000000000..e454d4413 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,53 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&ManagedSeed{}, func(obj interface{}) { SetObjectDefaults_ManagedSeed(obj.(*ManagedSeed)) }) + scheme.AddTypeDefaultingFunc(&ManagedSeedList{}, func(obj interface{}) { SetObjectDefaults_ManagedSeedList(obj.(*ManagedSeedList)) }) + return nil +} + +func SetObjectDefaults_ManagedSeed(in *ManagedSeed) { + SetDefaults_ManagedSeed(in) + if in.Spec.Gardenlet != nil { + if in.Spec.Gardenlet.Deployment != nil { + SetDefaults_GardenletDeployment(in.Spec.Gardenlet.Deployment) + if in.Spec.Gardenlet.Deployment.Image != nil { + SetDefaults_Image(in.Spec.Gardenlet.Deployment.Image) + } + } + } +} + +func SetObjectDefaults_ManagedSeedList(in *ManagedSeedList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ManagedSeed(a) + } +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/zz_generated.deepcopy.go new file mode 100644 index 000000000..de7376993 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/seedmanagement/zz_generated.deepcopy.go @@ -0,0 +1,300 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package seedmanagement + +import ( + core "github.com/gardener/gardener/pkg/apis/core" + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gardenlet) DeepCopyInto(out *Gardenlet) { + *out = *in + if in.Deployment != nil { + in, out := &in.Deployment, &out.Deployment + *out = new(GardenletDeployment) + (*in).DeepCopyInto(*out) + } + if in.Config != nil { + out.Config = in.Config.DeepCopyObject() + } + if in.Bootstrap != nil { + in, out := &in.Bootstrap, &out.Bootstrap + *out = new(Bootstrap) + **out = **in + } + if in.MergeWithParent != nil { + in, out := &in.MergeWithParent, &out.MergeWithParent + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gardenlet. +func (in *Gardenlet) DeepCopy() *Gardenlet { + if in == nil { + return nil + } + out := new(Gardenlet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GardenletDeployment) DeepCopyInto(out *GardenletDeployment) { + *out = *in + if in.ReplicaCount != nil { + in, out := &in.ReplicaCount, &out.ReplicaCount + *out = new(int32) + **out = **in + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.ServiceAccountName != nil { + in, out := &in.ServiceAccountName, &out.ServiceAccountName + *out = new(string) + **out = **in + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(Image) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.PodLabels != nil { + in, out := &in.PodLabels, &out.PodLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PodAnnotations != nil { + in, out := &in.PodAnnotations, &out.PodAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AdditionalVolumes != nil { + in, out := &in.AdditionalVolumes, &out.AdditionalVolumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalVolumeMounts != nil { + in, out := &in.AdditionalVolumeMounts, &out.AdditionalVolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VPA != nil { + in, out := &in.VPA, &out.VPA + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GardenletDeployment. +func (in *GardenletDeployment) DeepCopy() *GardenletDeployment { + if in == nil { + return nil + } + out := new(GardenletDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + if in.Repository != nil { + in, out := &in.Repository, &out.Repository + *out = new(string) + **out = **in + } + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.PullPolicy != nil { + in, out := &in.PullPolicy, &out.PullPolicy + *out = new(v1.PullPolicy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedSeed) DeepCopyInto(out *ManagedSeed) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedSeed. +func (in *ManagedSeed) DeepCopy() *ManagedSeed { + if in == nil { + return nil + } + out := new(ManagedSeed) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedSeed) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedSeedList) DeepCopyInto(out *ManagedSeedList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ManagedSeed, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedSeedList. +func (in *ManagedSeedList) DeepCopy() *ManagedSeedList { + if in == nil { + return nil + } + out := new(ManagedSeedList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ManagedSeedList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedSeedSpec) DeepCopyInto(out *ManagedSeedSpec) { + *out = *in + out.Shoot = in.Shoot + if in.SeedTemplate != nil { + in, out := &in.SeedTemplate, &out.SeedTemplate + *out = new(core.SeedTemplate) + (*in).DeepCopyInto(*out) + } + if in.Gardenlet != nil { + in, out := &in.Gardenlet, &out.Gardenlet + *out = new(Gardenlet) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedSeedSpec. +func (in *ManagedSeedSpec) DeepCopy() *ManagedSeedSpec { + if in == nil { + return nil + } + out := new(ManagedSeedSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedSeedStatus) DeepCopyInto(out *ManagedSeedStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]core.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedSeedStatus. +func (in *ManagedSeedStatus) DeepCopy() *ManagedSeedStatus { + if in == nil { + return nil + } + out := new(ManagedSeedStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Shoot) DeepCopyInto(out *Shoot) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Shoot. +func (in *Shoot) DeepCopy() *Shoot { + if in == nil { + return nil + } + out := new(Shoot) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/settings/doc.go b/vendor/github.com/gardener/gardener/pkg/apis/settings/doc.go new file mode 100644 index 000000000..61b8fbc26 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/settings/doc.go @@ -0,0 +1,19 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package + +// Package settings is the internal version of the API. +// +groupName=settings.gardener.cloud +package settings diff --git a/vendor/github.com/gardener/gardener/pkg/apis/settings/register.go b/vendor/github.com/gardener/gardener/pkg/apis/settings/register.go new file mode 100644 index 000000000..a87296094 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/settings/register.go @@ -0,0 +1,54 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package settings + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the name of the settings API group. +const GroupName = "settings.gardener.cloud" + +// SchemeGroupVersion is group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is a new Scheme Builder which registers our API. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a reference to the Scheme Builder's AddToScheme function. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ClusterOpenIDConnectPreset{}, + &ClusterOpenIDConnectPresetList{}, + &OpenIDConnectPreset{}, + &OpenIDConnectPresetList{}, + ) + return nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/settings/types_cluster_openidconnect_preset.go b/vendor/github.com/gardener/gardener/pkg/apis/settings/types_cluster_openidconnect_preset.go new file mode 100644 index 000000000..45d1a77e2 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/settings/types_cluster_openidconnect_preset.go @@ -0,0 +1,69 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package settings + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterOpenIDConnectPreset is a OpenID Connect configuration that is applied +// to a Shoot objects cluster-wide. +type ClusterOpenIDConnectPreset struct { + metav1.TypeMeta + // Standard object metadata. + metav1.ObjectMeta + // Spec is the specification of this OpenIDConnect preset. + Spec ClusterOpenIDConnectPresetSpec +} + +// ClusterOpenIDConnectPresetSpec contains the OpenIDConnect specification and +// project selector matching Shoots in Projects. +type ClusterOpenIDConnectPresetSpec struct { + OpenIDConnectPresetSpec + + // Project decides whether to apply the configuration if the + // Shoot is in a specific Project matching the label selector. + // Use the selector only if the OIDC Preset is opt-in, because end + // users may skip the admission by setting the labels. + // Defaults to the empty LabelSelector, which matches everything. + ProjectSelector *metav1.LabelSelector +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterOpenIDConnectPresetList is a collection of ClusterOpenIDConnectPresets. +type ClusterOpenIDConnectPresetList struct { + metav1.TypeMeta + // Standard list object metadata. + metav1.ListMeta + // Items is the list of ClusterOpenIDConnectPresets. + Items []ClusterOpenIDConnectPreset +} + +var _ Preset = &ClusterOpenIDConnectPreset{} + +// GetPresetSpec returns a pointer to the OpenIDConnect specification. +func (o *ClusterOpenIDConnectPreset) GetPresetSpec() *OpenIDConnectPresetSpec { + return &o.Spec.OpenIDConnectPresetSpec +} + +// SetPresetSpec sets the OpenIDConnect specification. +func (o *ClusterOpenIDConnectPreset) SetPresetSpec(s *OpenIDConnectPresetSpec) { + o.Spec.OpenIDConnectPresetSpec = *s +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/settings/types_openidconnect_preset.go b/vendor/github.com/gardener/gardener/pkg/apis/settings/types_openidconnect_preset.go new file mode 100644 index 000000000..5871f8735 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/settings/types_openidconnect_preset.go @@ -0,0 +1,55 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package settings + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenIDConnectPreset is a OpenID Connect configuration that is applied +// to a Shoot in a namespace. +type OpenIDConnectPreset struct { + metav1.TypeMeta + // Standard object metadata. + metav1.ObjectMeta + + Spec OpenIDConnectPresetSpec +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenIDConnectPresetList is a collection of OpenIDConnectPresets. +type OpenIDConnectPresetList struct { + metav1.TypeMeta + // Standard list object metadata. + metav1.ListMeta + // Items is the list of OpenIDConnectPresets. + Items []OpenIDConnectPreset +} + +var _ Preset = &OpenIDConnectPreset{} + +// GetPresetSpec returns a pointer to the OpenIDConnect specification. +func (o *OpenIDConnectPreset) GetPresetSpec() *OpenIDConnectPresetSpec { + return &o.Spec +} + +// SetPresetSpec sets the OpenIDConnect specification. +func (o *OpenIDConnectPreset) SetPresetSpec(s *OpenIDConnectPresetSpec) { + o.Spec = *s +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/settings/types_shared.go b/vendor/github.com/gardener/gardener/pkg/apis/settings/types_shared.go new file mode 100644 index 000000000..849a9ee87 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/settings/types_shared.go @@ -0,0 +1,93 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package settings + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// OpenIDConnectPresetSpec contains the Shoot selector for which +// a specific OpenID Connect configuration is applied. +type OpenIDConnectPresetSpec struct { + + // Server contains the kube-apiserver's OpenID Connect configuration. + // This configuration is not overwritting any existing OpenID Connect + // configuration already set on the Shoot object. + Server KubeAPIServerOpenIDConnect + + // Client contains the configuration used for client OIDC authentication + // of Shoot clusters. + // This configuration is not overwritting any existing OpenID Connect + // client authentication already set on the Shoot object. + Client *OpenIDConnectClientAuthentication + + // ShootSelector decides whether to apply the configuration if the + // Shoot has matching labels. + // Use the selector only if the OIDC Preset is opt-in, because end + // users may skip the admission by setting the labels. + // Default to the empty LabelSelector, which matches everything. + ShootSelector *metav1.LabelSelector + + // Weight associated with matching the corresponding preset, + // in the range 1-100. + // Required. + Weight int32 +} + +// This is copied to not depend on the gardener package and keep it version agnostic. + +// KubeAPIServerOpenIDConnect contains configuration settings for the OIDC provider. +// Note: Descriptions were taken from the Kubernetes documentation. +type KubeAPIServerOpenIDConnect struct { + // If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used. + CABundle *string + // The client ID for the OpenID Connect client. + // Required. + ClientID string + // If provided, the name of a custom OpenID Connect claim for specifying user groups. The claim value is expected to be a string or array of strings. This flag is experimental, please see the authentication documentation for further details. + GroupsClaim *string + // If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies. + GroupsPrefix *string + // The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT). + // Required. + IssuerURL string + // ATTENTION: Only meaningful for Kubernetes >= 1.11 + // key=value pairs that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value. + RequiredClaims map[string]string + // List of allowed JOSE asymmetric signing algorithms. JWTs with a 'alg' header value not in this list will be rejected. Values are defined by RFC 7518 https://tools.ietf.org/html/rfc7518#section-3.1 + SigningAlgs []string + // The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable. This flag is experimental, please see the authentication documentation for further details. (default "sub") + UsernameClaim *string + // If provided, all usernames will be prefixed with this value. If not provided, username claims other than 'email' are prefixed by the issuer URL to avoid clashes. To skip any prefixing, provide the value '-'. + UsernamePrefix *string +} + +// OpenIDConnectClientAuthentication contains configuration for OIDC clients. +type OpenIDConnectClientAuthentication struct { + // The client Secret for the OpenID Connect client. + Secret *string + + // Extra configuration added to kubeconfig's auth-provider. + // Must not be any of idp-issuer-url, client-id, client-secret, idp-certificate-authority, idp-certificate-authority-data, id-token or refresh-token + ExtraConfig map[string]string +} + +// Preset offers access to the specification of a OpenID preset object. +// Mainly used for tests. +type Preset interface { + metav1.ObjectMetaAccessor + GetPresetSpec() *OpenIDConnectPresetSpec + SetPresetSpec(s *OpenIDConnectPresetSpec) +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/defaults.go new file mode 100644 index 000000000..8e91ce30a --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/defaults.go @@ -0,0 +1,55 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +// SetDefaults_OpenIDConnectPreset sets default values for OpenIDConnectPreset objects. +func SetDefaults_OpenIDConnectPreset(obj *OpenIDConnectPreset) { + if obj.Spec.ShootSelector == nil { + obj.Spec.ShootSelector = &metav1.LabelSelector{} + } + setDefaultServerSpec(&obj.Spec.Server) +} + +// SetDefaults_ClusterOpenIDConnectPreset sets default values for ClusterOpenIDConnectPreset objects. +func SetDefaults_ClusterOpenIDConnectPreset(obj *ClusterOpenIDConnectPreset) { + if obj.Spec.ShootSelector == nil { + obj.Spec.ShootSelector = &metav1.LabelSelector{} + } + + if obj.Spec.ProjectSelector == nil { + obj.Spec.ProjectSelector = &metav1.LabelSelector{} + } + setDefaultServerSpec(&obj.Spec.Server) +} + +func setDefaultServerSpec(spec *KubeAPIServerOpenIDConnect) { + if len(spec.SigningAlgs) == 0 { + spec.SigningAlgs = []string{DefaultSignAlg} + } + + if spec.UsernameClaim == nil { + usernameClaim := DefaultUsernameClaim + spec.UsernameClaim = &usernameClaim + } +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/doc.go b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/doc.go new file mode 100644 index 000000000..7ce0b2eef --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/doc.go @@ -0,0 +1,26 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v1alpha1 is the v1alpha1 version of the API. +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/gardener/gardener/pkg/apis/settings +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta +// +k8s:protobuf-gen=package + +//go:generate gen-crd-api-reference-docs -api-dir . -config ../../../../hack/api-reference/settings-config.json -template-dir ../../../../hack/api-reference/template -out-file ../../../../hack/api-reference/settings.md + +// Package v1alpha1 is a version of the API. +// +groupName=settings.gardener.cloud +package v1alpha1 diff --git a/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/generated.pb.go b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/generated.pb.go new file mode 100644 index 000000000..8d7d4e915 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/generated.pb.go @@ -0,0 +1,2621 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/gardener/gardener/pkg/apis/settings/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ClusterOpenIDConnectPreset) Reset() { *m = ClusterOpenIDConnectPreset{} } +func (*ClusterOpenIDConnectPreset) ProtoMessage() {} +func (*ClusterOpenIDConnectPreset) Descriptor() ([]byte, []int) { + return fileDescriptor_f0cd3f80cc90ed56, []int{0} +} +func (m *ClusterOpenIDConnectPreset) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterOpenIDConnectPreset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterOpenIDConnectPreset) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterOpenIDConnectPreset.Merge(m, src) +} +func (m *ClusterOpenIDConnectPreset) XXX_Size() int { + return m.Size() +} +func (m *ClusterOpenIDConnectPreset) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterOpenIDConnectPreset.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterOpenIDConnectPreset proto.InternalMessageInfo + +func (m *ClusterOpenIDConnectPresetList) Reset() { *m = ClusterOpenIDConnectPresetList{} } +func (*ClusterOpenIDConnectPresetList) ProtoMessage() {} +func (*ClusterOpenIDConnectPresetList) Descriptor() ([]byte, []int) { + return fileDescriptor_f0cd3f80cc90ed56, []int{1} +} +func (m *ClusterOpenIDConnectPresetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterOpenIDConnectPresetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterOpenIDConnectPresetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterOpenIDConnectPresetList.Merge(m, src) +} +func (m *ClusterOpenIDConnectPresetList) XXX_Size() int { + return m.Size() +} +func (m *ClusterOpenIDConnectPresetList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterOpenIDConnectPresetList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterOpenIDConnectPresetList proto.InternalMessageInfo + +func (m *ClusterOpenIDConnectPresetSpec) Reset() { *m = ClusterOpenIDConnectPresetSpec{} } +func (*ClusterOpenIDConnectPresetSpec) ProtoMessage() {} +func (*ClusterOpenIDConnectPresetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f0cd3f80cc90ed56, []int{2} +} +func (m *ClusterOpenIDConnectPresetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterOpenIDConnectPresetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterOpenIDConnectPresetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterOpenIDConnectPresetSpec.Merge(m, src) +} +func (m *ClusterOpenIDConnectPresetSpec) XXX_Size() int { + return m.Size() +} +func (m *ClusterOpenIDConnectPresetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterOpenIDConnectPresetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterOpenIDConnectPresetSpec proto.InternalMessageInfo + +func (m *KubeAPIServerOpenIDConnect) Reset() { *m = KubeAPIServerOpenIDConnect{} } +func (*KubeAPIServerOpenIDConnect) ProtoMessage() {} +func (*KubeAPIServerOpenIDConnect) Descriptor() ([]byte, []int) { + return fileDescriptor_f0cd3f80cc90ed56, []int{3} +} +func (m *KubeAPIServerOpenIDConnect) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KubeAPIServerOpenIDConnect) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KubeAPIServerOpenIDConnect) XXX_Merge(src proto.Message) { + xxx_messageInfo_KubeAPIServerOpenIDConnect.Merge(m, src) +} +func (m *KubeAPIServerOpenIDConnect) XXX_Size() int { + return m.Size() +} +func (m *KubeAPIServerOpenIDConnect) XXX_DiscardUnknown() { + xxx_messageInfo_KubeAPIServerOpenIDConnect.DiscardUnknown(m) +} + +var xxx_messageInfo_KubeAPIServerOpenIDConnect proto.InternalMessageInfo + +func (m *OpenIDConnectClientAuthentication) Reset() { *m = OpenIDConnectClientAuthentication{} } +func (*OpenIDConnectClientAuthentication) ProtoMessage() {} +func (*OpenIDConnectClientAuthentication) Descriptor() ([]byte, []int) { + return fileDescriptor_f0cd3f80cc90ed56, []int{4} +} +func (m *OpenIDConnectClientAuthentication) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OpenIDConnectClientAuthentication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OpenIDConnectClientAuthentication) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpenIDConnectClientAuthentication.Merge(m, src) +} +func (m *OpenIDConnectClientAuthentication) XXX_Size() int { + return m.Size() +} +func (m *OpenIDConnectClientAuthentication) XXX_DiscardUnknown() { + xxx_messageInfo_OpenIDConnectClientAuthentication.DiscardUnknown(m) +} + +var xxx_messageInfo_OpenIDConnectClientAuthentication proto.InternalMessageInfo + +func (m *OpenIDConnectPreset) Reset() { *m = OpenIDConnectPreset{} } +func (*OpenIDConnectPreset) ProtoMessage() {} +func (*OpenIDConnectPreset) Descriptor() ([]byte, []int) { + return fileDescriptor_f0cd3f80cc90ed56, []int{5} +} +func (m *OpenIDConnectPreset) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OpenIDConnectPreset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OpenIDConnectPreset) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpenIDConnectPreset.Merge(m, src) +} +func (m *OpenIDConnectPreset) XXX_Size() int { + return m.Size() +} +func (m *OpenIDConnectPreset) XXX_DiscardUnknown() { + xxx_messageInfo_OpenIDConnectPreset.DiscardUnknown(m) +} + +var xxx_messageInfo_OpenIDConnectPreset proto.InternalMessageInfo + +func (m *OpenIDConnectPresetList) Reset() { *m = OpenIDConnectPresetList{} } +func (*OpenIDConnectPresetList) ProtoMessage() {} +func (*OpenIDConnectPresetList) Descriptor() ([]byte, []int) { + return fileDescriptor_f0cd3f80cc90ed56, []int{6} +} +func (m *OpenIDConnectPresetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OpenIDConnectPresetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OpenIDConnectPresetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpenIDConnectPresetList.Merge(m, src) +} +func (m *OpenIDConnectPresetList) XXX_Size() int { + return m.Size() +} +func (m *OpenIDConnectPresetList) XXX_DiscardUnknown() { + xxx_messageInfo_OpenIDConnectPresetList.DiscardUnknown(m) +} + +var xxx_messageInfo_OpenIDConnectPresetList proto.InternalMessageInfo + +func (m *OpenIDConnectPresetSpec) Reset() { *m = OpenIDConnectPresetSpec{} } +func (*OpenIDConnectPresetSpec) ProtoMessage() {} +func (*OpenIDConnectPresetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f0cd3f80cc90ed56, []int{7} +} +func (m *OpenIDConnectPresetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OpenIDConnectPresetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OpenIDConnectPresetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpenIDConnectPresetSpec.Merge(m, src) +} +func (m *OpenIDConnectPresetSpec) XXX_Size() int { + return m.Size() +} +func (m *OpenIDConnectPresetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_OpenIDConnectPresetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_OpenIDConnectPresetSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ClusterOpenIDConnectPreset)(nil), "github.com.gardener.gardener.pkg.apis.settings.v1alpha1.ClusterOpenIDConnectPreset") + proto.RegisterType((*ClusterOpenIDConnectPresetList)(nil), "github.com.gardener.gardener.pkg.apis.settings.v1alpha1.ClusterOpenIDConnectPresetList") + proto.RegisterType((*ClusterOpenIDConnectPresetSpec)(nil), "github.com.gardener.gardener.pkg.apis.settings.v1alpha1.ClusterOpenIDConnectPresetSpec") + proto.RegisterType((*KubeAPIServerOpenIDConnect)(nil), "github.com.gardener.gardener.pkg.apis.settings.v1alpha1.KubeAPIServerOpenIDConnect") + proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.settings.v1alpha1.KubeAPIServerOpenIDConnect.RequiredClaimsEntry") + proto.RegisterType((*OpenIDConnectClientAuthentication)(nil), "github.com.gardener.gardener.pkg.apis.settings.v1alpha1.OpenIDConnectClientAuthentication") + proto.RegisterMapType((map[string]string)(nil), "github.com.gardener.gardener.pkg.apis.settings.v1alpha1.OpenIDConnectClientAuthentication.ExtraConfigEntry") + proto.RegisterType((*OpenIDConnectPreset)(nil), "github.com.gardener.gardener.pkg.apis.settings.v1alpha1.OpenIDConnectPreset") + proto.RegisterType((*OpenIDConnectPresetList)(nil), "github.com.gardener.gardener.pkg.apis.settings.v1alpha1.OpenIDConnectPresetList") + proto.RegisterType((*OpenIDConnectPresetSpec)(nil), "github.com.gardener.gardener.pkg.apis.settings.v1alpha1.OpenIDConnectPresetSpec") +} + +func init() { + proto.RegisterFile("github.com/gardener/gardener/pkg/apis/settings/v1alpha1/generated.proto", fileDescriptor_f0cd3f80cc90ed56) +} + +var fileDescriptor_f0cd3f80cc90ed56 = []byte{ + // 957 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x55, 0x4f, 0x6f, 0xe3, 0xc4, + 0x1b, 0x8e, 0x93, 0x26, 0xbf, 0x64, 0xd2, 0x7f, 0xbf, 0x29, 0xa2, 0x51, 0x0e, 0x4e, 0xc9, 0x01, + 0xf5, 0x00, 0x0e, 0x5d, 0x56, 0xec, 0x6a, 0x0f, 0x48, 0x49, 0x76, 0x55, 0x45, 0x14, 0x6d, 0x34, + 0x51, 0xb5, 0xd2, 0x8a, 0x03, 0x8e, 0xf3, 0xd6, 0x19, 0xe2, 0xd8, 0xde, 0x99, 0x71, 0x68, 0x85, + 0x84, 0xf8, 0x08, 0x7c, 0x06, 0x38, 0x70, 0x60, 0x3f, 0x05, 0xa7, 0x1e, 0xf7, 0xb8, 0xa7, 0x88, + 0x9a, 0x1b, 0x7c, 0x01, 0xb8, 0x21, 0x8f, 0x27, 0xb5, 0x9d, 0x36, 0xb0, 0x54, 0xad, 0xf6, 0xe6, + 0x79, 0xff, 0x3d, 0xcf, 0xfb, 0xcc, 0xeb, 0x79, 0xd1, 0xa1, 0x4d, 0xc5, 0x38, 0x18, 0x1a, 0x96, + 0x37, 0x6d, 0xd9, 0x26, 0x1b, 0x81, 0x0b, 0x2c, 0xf9, 0xf0, 0x27, 0x76, 0xcb, 0xf4, 0x29, 0x6f, + 0x71, 0x10, 0x82, 0xba, 0x36, 0x6f, 0xcd, 0x0e, 0x4c, 0xc7, 0x1f, 0x9b, 0x07, 0x2d, 0x3b, 0x0a, + 0x30, 0x05, 0x8c, 0x0c, 0x9f, 0x79, 0xc2, 0xc3, 0x0f, 0x92, 0x42, 0xc6, 0x22, 0x3f, 0xf9, 0xf0, + 0x27, 0xb6, 0x11, 0x15, 0x32, 0x16, 0x85, 0x8c, 0x45, 0xa1, 0xfa, 0x87, 0x69, 0x06, 0x9e, 0xed, + 0xb5, 0x64, 0xbd, 0x61, 0x70, 0x22, 0x4f, 0xf2, 0x20, 0xbf, 0x62, 0x9c, 0xfa, 0xfd, 0xc9, 0x43, + 0x6e, 0x50, 0x2f, 0xa2, 0x35, 0x35, 0xad, 0x31, 0x75, 0x81, 0x9d, 0x25, 0x3c, 0xa7, 0x20, 0xcc, + 0xd6, 0xec, 0x0a, 0xbb, 0x7a, 0x6b, 0x55, 0x16, 0x0b, 0x5c, 0x41, 0xa7, 0x70, 0x25, 0xe1, 0x93, + 0x7f, 0x4b, 0xe0, 0xd6, 0x18, 0xa6, 0xe6, 0x72, 0x5e, 0xf3, 0x2f, 0x0d, 0xd5, 0xbb, 0x4e, 0xc0, + 0x05, 0xb0, 0xa7, 0x3e, 0xb8, 0xbd, 0xc7, 0x5d, 0xcf, 0x75, 0xc1, 0x12, 0x7d, 0x06, 0x1c, 0x04, + 0xfe, 0x12, 0x95, 0x23, 0x8a, 0x23, 0x53, 0x98, 0x35, 0x6d, 0x4f, 0xdb, 0xaf, 0xde, 0xfb, 0xc8, + 0x88, 0x91, 0x8c, 0x34, 0x52, 0xa2, 0x57, 0x14, 0x6d, 0xcc, 0x0e, 0x8c, 0xa7, 0xc3, 0xaf, 0xc0, + 0x12, 0x9f, 0x83, 0x30, 0x3b, 0xf8, 0x7c, 0xde, 0xc8, 0x85, 0xf3, 0x06, 0x4a, 0x6c, 0xe4, 0xb2, + 0x2a, 0x3e, 0x43, 0x6b, 0xdc, 0x07, 0xab, 0x96, 0x97, 0xd5, 0x9f, 0x19, 0x37, 0xbc, 0x16, 0x63, + 0x75, 0x13, 0x03, 0x1f, 0xac, 0xce, 0xba, 0x22, 0xb1, 0x16, 0x9d, 0x88, 0x84, 0x6c, 0xfe, 0xa9, + 0x21, 0x7d, 0x75, 0xda, 0x11, 0xe5, 0x02, 0x7f, 0x71, 0xa5, 0x7f, 0xe3, 0xcd, 0xfa, 0x8f, 0xb2, + 0x65, 0xf7, 0xdb, 0x0a, 0xb8, 0xbc, 0xb0, 0xa4, 0x7a, 0x3f, 0x45, 0x45, 0x2a, 0x60, 0xca, 0x6b, + 0xf9, 0xbd, 0xc2, 0x7e, 0xf5, 0xde, 0xe0, 0x0e, 0x9a, 0xef, 0x6c, 0x28, 0xfc, 0x62, 0x2f, 0x42, + 0x22, 0x31, 0x60, 0xf3, 0x97, 0xfc, 0x3f, 0xb5, 0x1e, 0x69, 0x84, 0x7f, 0xd6, 0xd0, 0xae, 0x77, + 0xbd, 0x4f, 0x49, 0xd1, 0xbf, 0x31, 0xdf, 0x55, 0xb7, 0xd4, 0x50, 0x64, 0x77, 0x57, 0x04, 0x90, + 0x55, 0x8c, 0x30, 0x43, 0x5b, 0x3e, 0xf3, 0xa2, 0xf9, 0x1a, 0x80, 0x03, 0x96, 0xf0, 0x98, 0x9a, + 0xa8, 0x8f, 0xdf, 0xf0, 0xbe, 0xcc, 0x21, 0x38, 0x8b, 0xd4, 0xce, 0x4e, 0x38, 0x6f, 0x6c, 0xf5, + 0xb3, 0xf5, 0xc8, 0x32, 0x40, 0xf3, 0xc7, 0x22, 0xaa, 0x7f, 0x16, 0x0c, 0xa1, 0xdd, 0xef, 0x0d, + 0x80, 0xcd, 0x96, 0xa4, 0xc4, 0xfb, 0xa8, 0x6c, 0x99, 0x9d, 0xc0, 0x1d, 0x39, 0x20, 0x05, 0xab, + 0x74, 0xd6, 0xa3, 0x39, 0xe8, 0xb6, 0x63, 0x1b, 0xb9, 0xf4, 0xe2, 0x0f, 0x50, 0xd9, 0x72, 0x28, + 0xb8, 0xa2, 0xf7, 0x58, 0xb2, 0xae, 0x24, 0x53, 0xd3, 0x55, 0x76, 0x72, 0x19, 0x81, 0x0f, 0x50, + 0xd5, 0x66, 0x5e, 0xe0, 0xf3, 0xae, 0x63, 0xd2, 0x69, 0xad, 0x20, 0x13, 0xb6, 0xc2, 0x79, 0xa3, + 0x7a, 0x98, 0x98, 0x49, 0x3a, 0x06, 0xdf, 0x47, 0xeb, 0xf1, 0xb1, 0xcf, 0xe0, 0x84, 0x9e, 0xd6, + 0xd6, 0x62, 0x90, 0x70, 0xde, 0x58, 0x3f, 0x4c, 0xd9, 0x49, 0x26, 0x0a, 0xb7, 0x50, 0x85, 0x72, + 0x1e, 0x00, 0x3b, 0x26, 0x47, 0xb5, 0xa2, 0x4c, 0xf9, 0xbf, 0xe2, 0x55, 0xe9, 0x2d, 0x1c, 0x24, + 0x89, 0xc1, 0x3f, 0x69, 0x68, 0x93, 0xc1, 0x8b, 0x80, 0x32, 0x18, 0x49, 0x60, 0x5e, 0x2b, 0xc9, + 0xc9, 0xb6, 0x6f, 0x3c, 0x29, 0xab, 0xf5, 0x35, 0x48, 0x06, 0xe9, 0x89, 0x2b, 0xd8, 0x59, 0xe7, + 0x5d, 0xc5, 0x6f, 0x33, 0xeb, 0x24, 0x4b, 0xb4, 0x22, 0x0d, 0x39, 0xb5, 0x5d, 0xea, 0xda, 0x6d, + 0xc7, 0xe6, 0xb5, 0xff, 0xed, 0x15, 0x16, 0x1a, 0x0e, 0x12, 0x33, 0x49, 0xc7, 0xe0, 0x07, 0x68, + 0x23, 0xe0, 0xc0, 0x5c, 0x73, 0x0a, 0xb1, 0xf0, 0xe5, 0x58, 0x91, 0x70, 0xde, 0xd8, 0x38, 0x4e, + 0x3b, 0x48, 0x36, 0x0e, 0x3f, 0x42, 0x9b, 0x0b, 0x83, 0x92, 0xbf, 0x22, 0x33, 0x71, 0xc4, 0xf3, + 0x38, 0xe3, 0x21, 0x4b, 0x91, 0xf5, 0x36, 0xda, 0xb9, 0xa6, 0x4d, 0xbc, 0x8d, 0x0a, 0x13, 0x38, + 0x8b, 0xa7, 0x8a, 0x44, 0x9f, 0xf8, 0x1d, 0x54, 0x9c, 0x99, 0x4e, 0x00, 0xf1, 0xfc, 0x90, 0xf8, + 0xf0, 0x28, 0xff, 0x50, 0x6b, 0xbe, 0xcc, 0xa3, 0xf7, 0x32, 0xc2, 0xc5, 0x23, 0xd5, 0x0e, 0xc4, + 0x18, 0x5c, 0x41, 0x2d, 0x53, 0x50, 0xcf, 0xc5, 0x4d, 0x54, 0xe2, 0x60, 0x31, 0x10, 0x6a, 0x54, + 0x51, 0x38, 0x6f, 0x94, 0x06, 0xd2, 0x42, 0x94, 0x07, 0xff, 0xa0, 0xa1, 0x2a, 0x9c, 0x0a, 0x66, + 0x76, 0x3d, 0xf7, 0x84, 0xda, 0xea, 0xd5, 0x9a, 0xdc, 0xce, 0x2b, 0x70, 0x1d, 0x2b, 0xe3, 0x49, + 0x82, 0x16, 0xdf, 0xef, 0x8e, 0xba, 0xdf, 0x6a, 0xca, 0x43, 0xd2, 0xa4, 0xea, 0x9f, 0xa2, 0xed, + 0xe5, 0xac, 0xff, 0x24, 0xd7, 0x1f, 0x1a, 0xda, 0x79, 0x3b, 0x9b, 0x90, 0x65, 0x36, 0xe1, 0xed, + 0x3f, 0xae, 0xd7, 0xad, 0xc0, 0xdf, 0x35, 0xb4, 0xfb, 0x76, 0x76, 0xdf, 0x8b, 0xec, 0xee, 0x3b, + 0xba, 0xcd, 0x76, 0x57, 0x2c, 0xbd, 0x97, 0x05, 0xb4, 0x6a, 0xb1, 0xe0, 0x6f, 0xa2, 0xf9, 0x8f, + 0xde, 0x18, 0xd5, 0xea, 0xe0, 0x0e, 0x5e, 0xac, 0xce, 0xa6, 0xa2, 0x55, 0x8a, 0x9d, 0x44, 0x41, + 0xe2, 0x6f, 0x51, 0x29, 0x7e, 0xdd, 0xd5, 0xdd, 0x3f, 0xbf, 0xbb, 0x5f, 0x2a, 0xfe, 0xb1, 0x63, + 0x0f, 0x51, 0xa8, 0xd8, 0x41, 0x1b, 0x7c, 0xec, 0x79, 0xc9, 0xea, 0x2c, 0xdc, 0x7c, 0x75, 0xca, + 0xf7, 0x70, 0x90, 0xae, 0x46, 0xb2, 0xc5, 0xf1, 0xfb, 0xa8, 0xf4, 0x35, 0x50, 0x7b, 0x2c, 0xe4, + 0x1a, 0x2a, 0x26, 0xaa, 0x3c, 0x93, 0x56, 0xa2, 0xbc, 0x1d, 0xe3, 0xfc, 0x42, 0xcf, 0xbd, 0xba, + 0xd0, 0x73, 0xaf, 0x2f, 0xf4, 0xdc, 0x77, 0xa1, 0xae, 0x9d, 0x87, 0xba, 0xf6, 0x2a, 0xd4, 0xb5, + 0xd7, 0xa1, 0xae, 0xfd, 0x1a, 0xea, 0xda, 0xf7, 0xbf, 0xe9, 0xb9, 0xe7, 0xe5, 0x45, 0xef, 0x7f, + 0x07, 0x00, 0x00, 0xff, 0xff, 0x68, 0xeb, 0xae, 0x3a, 0x1b, 0x0c, 0x00, 0x00, +} + +func (m *ClusterOpenIDConnectPreset) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterOpenIDConnectPreset) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterOpenIDConnectPreset) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterOpenIDConnectPresetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterOpenIDConnectPresetList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterOpenIDConnectPresetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterOpenIDConnectPresetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterOpenIDConnectPresetSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterOpenIDConnectPresetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ProjectSelector != nil { + { + size, err := m.ProjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.OpenIDConnectPresetSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *KubeAPIServerOpenIDConnect) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KubeAPIServerOpenIDConnect) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KubeAPIServerOpenIDConnect) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.UsernamePrefix != nil { + i -= len(*m.UsernamePrefix) + copy(dAtA[i:], *m.UsernamePrefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UsernamePrefix))) + i-- + dAtA[i] = 0x4a + } + if m.UsernameClaim != nil { + i -= len(*m.UsernameClaim) + copy(dAtA[i:], *m.UsernameClaim) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UsernameClaim))) + i-- + dAtA[i] = 0x42 + } + if len(m.SigningAlgs) > 0 { + for iNdEx := len(m.SigningAlgs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SigningAlgs[iNdEx]) + copy(dAtA[i:], m.SigningAlgs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SigningAlgs[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.RequiredClaims) > 0 { + keysForRequiredClaims := make([]string, 0, len(m.RequiredClaims)) + for k := range m.RequiredClaims { + keysForRequiredClaims = append(keysForRequiredClaims, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequiredClaims) + for iNdEx := len(keysForRequiredClaims) - 1; iNdEx >= 0; iNdEx-- { + v := m.RequiredClaims[string(keysForRequiredClaims[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForRequiredClaims[iNdEx]) + copy(dAtA[i:], keysForRequiredClaims[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequiredClaims[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.IssuerURL) + copy(dAtA[i:], m.IssuerURL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IssuerURL))) + i-- + dAtA[i] = 0x2a + if m.GroupsPrefix != nil { + i -= len(*m.GroupsPrefix) + copy(dAtA[i:], *m.GroupsPrefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GroupsPrefix))) + i-- + dAtA[i] = 0x22 + } + if m.GroupsClaim != nil { + i -= len(*m.GroupsClaim) + copy(dAtA[i:], *m.GroupsClaim) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GroupsClaim))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.ClientID) + copy(dAtA[i:], m.ClientID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientID))) + i-- + dAtA[i] = 0x12 + if m.CABundle != nil { + i -= len(*m.CABundle) + copy(dAtA[i:], *m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CABundle))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OpenIDConnectClientAuthentication) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpenIDConnectClientAuthentication) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OpenIDConnectClientAuthentication) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ExtraConfig) > 0 { + keysForExtraConfig := make([]string, 0, len(m.ExtraConfig)) + for k := range m.ExtraConfig { + keysForExtraConfig = append(keysForExtraConfig, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtraConfig) + for iNdEx := len(keysForExtraConfig) - 1; iNdEx >= 0; iNdEx-- { + v := m.ExtraConfig[string(keysForExtraConfig[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForExtraConfig[iNdEx]) + copy(dAtA[i:], keysForExtraConfig[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtraConfig[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.Secret != nil { + i -= len(*m.Secret) + copy(dAtA[i:], *m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Secret))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OpenIDConnectPreset) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpenIDConnectPreset) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OpenIDConnectPreset) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OpenIDConnectPresetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpenIDConnectPresetList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OpenIDConnectPresetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OpenIDConnectPresetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpenIDConnectPresetSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OpenIDConnectPresetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) + i-- + dAtA[i] = 0x20 + if m.ShootSelector != nil { + { + size, err := m.ShootSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Client != nil { + { + size, err := m.Client.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Server.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClusterOpenIDConnectPreset) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterOpenIDConnectPresetList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterOpenIDConnectPresetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.OpenIDConnectPresetSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.ProjectSelector != nil { + l = m.ProjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *KubeAPIServerOpenIDConnect) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CABundle != nil { + l = len(*m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ClientID) + n += 1 + l + sovGenerated(uint64(l)) + if m.GroupsClaim != nil { + l = len(*m.GroupsClaim) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GroupsPrefix != nil { + l = len(*m.GroupsPrefix) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.IssuerURL) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.RequiredClaims) > 0 { + for k, v := range m.RequiredClaims { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.SigningAlgs) > 0 { + for _, s := range m.SigningAlgs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.UsernameClaim != nil { + l = len(*m.UsernameClaim) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.UsernamePrefix != nil { + l = len(*m.UsernamePrefix) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *OpenIDConnectClientAuthentication) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Secret != nil { + l = len(*m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ExtraConfig) > 0 { + for k, v := range m.ExtraConfig { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *OpenIDConnectPreset) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *OpenIDConnectPresetList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *OpenIDConnectPresetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Server.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Client != nil { + l = m.Client.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ShootSelector != nil { + l = m.ShootSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Weight)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterOpenIDConnectPreset) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterOpenIDConnectPreset{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterOpenIDConnectPresetSpec", "ClusterOpenIDConnectPresetSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterOpenIDConnectPresetList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterOpenIDConnectPreset{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterOpenIDConnectPreset", "ClusterOpenIDConnectPreset", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterOpenIDConnectPresetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterOpenIDConnectPresetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterOpenIDConnectPresetSpec{`, + `OpenIDConnectPresetSpec:` + strings.Replace(strings.Replace(this.OpenIDConnectPresetSpec.String(), "OpenIDConnectPresetSpec", "OpenIDConnectPresetSpec", 1), `&`, ``, 1) + `,`, + `ProjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ProjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *KubeAPIServerOpenIDConnect) String() string { + if this == nil { + return "nil" + } + keysForRequiredClaims := make([]string, 0, len(this.RequiredClaims)) + for k := range this.RequiredClaims { + keysForRequiredClaims = append(keysForRequiredClaims, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequiredClaims) + mapStringForRequiredClaims := "map[string]string{" + for _, k := range keysForRequiredClaims { + mapStringForRequiredClaims += fmt.Sprintf("%v: %v,", k, this.RequiredClaims[k]) + } + mapStringForRequiredClaims += "}" + s := strings.Join([]string{`&KubeAPIServerOpenIDConnect{`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `ClientID:` + fmt.Sprintf("%v", this.ClientID) + `,`, + `GroupsClaim:` + valueToStringGenerated(this.GroupsClaim) + `,`, + `GroupsPrefix:` + valueToStringGenerated(this.GroupsPrefix) + `,`, + `IssuerURL:` + fmt.Sprintf("%v", this.IssuerURL) + `,`, + `RequiredClaims:` + mapStringForRequiredClaims + `,`, + `SigningAlgs:` + fmt.Sprintf("%v", this.SigningAlgs) + `,`, + `UsernameClaim:` + valueToStringGenerated(this.UsernameClaim) + `,`, + `UsernamePrefix:` + valueToStringGenerated(this.UsernamePrefix) + `,`, + `}`, + }, "") + return s +} +func (this *OpenIDConnectClientAuthentication) String() string { + if this == nil { + return "nil" + } + keysForExtraConfig := make([]string, 0, len(this.ExtraConfig)) + for k := range this.ExtraConfig { + keysForExtraConfig = append(keysForExtraConfig, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtraConfig) + mapStringForExtraConfig := "map[string]string{" + for _, k := range keysForExtraConfig { + mapStringForExtraConfig += fmt.Sprintf("%v: %v,", k, this.ExtraConfig[k]) + } + mapStringForExtraConfig += "}" + s := strings.Join([]string{`&OpenIDConnectClientAuthentication{`, + `Secret:` + valueToStringGenerated(this.Secret) + `,`, + `ExtraConfig:` + mapStringForExtraConfig + `,`, + `}`, + }, "") + return s +} +func (this *OpenIDConnectPreset) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OpenIDConnectPreset{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "OpenIDConnectPresetSpec", "OpenIDConnectPresetSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *OpenIDConnectPresetList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]OpenIDConnectPreset{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "OpenIDConnectPreset", "OpenIDConnectPreset", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&OpenIDConnectPresetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *OpenIDConnectPresetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OpenIDConnectPresetSpec{`, + `Server:` + strings.Replace(strings.Replace(this.Server.String(), "KubeAPIServerOpenIDConnect", "KubeAPIServerOpenIDConnect", 1), `&`, ``, 1) + `,`, + `Client:` + strings.Replace(this.Client.String(), "OpenIDConnectClientAuthentication", "OpenIDConnectClientAuthentication", 1) + `,`, + `ShootSelector:` + strings.Replace(fmt.Sprintf("%v", this.ShootSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterOpenIDConnectPreset) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterOpenIDConnectPreset: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterOpenIDConnectPreset: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterOpenIDConnectPresetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterOpenIDConnectPresetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterOpenIDConnectPresetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterOpenIDConnectPreset{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterOpenIDConnectPresetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterOpenIDConnectPresetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterOpenIDConnectPresetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OpenIDConnectPresetSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.OpenIDConnectPresetSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProjectSelector == nil { + m.ProjectSelector = &v1.LabelSelector{} + } + if err := m.ProjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KubeAPIServerOpenIDConnect) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KubeAPIServerOpenIDConnect: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KubeAPIServerOpenIDConnect: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.CABundle = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupsClaim", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.GroupsClaim = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupsPrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.GroupsPrefix = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IssuerURL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IssuerURL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredClaims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequiredClaims == nil { + m.RequiredClaims = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.RequiredClaims[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SigningAlgs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SigningAlgs = append(m.SigningAlgs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsernameClaim", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.UsernameClaim = &s + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsernamePrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.UsernamePrefix = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OpenIDConnectClientAuthentication) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OpenIDConnectClientAuthentication: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OpenIDConnectClientAuthentication: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Secret = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtraConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExtraConfig == nil { + m.ExtraConfig = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ExtraConfig[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OpenIDConnectPreset) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OpenIDConnectPreset: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OpenIDConnectPreset: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OpenIDConnectPresetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OpenIDConnectPresetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OpenIDConnectPresetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, OpenIDConnectPreset{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OpenIDConnectPresetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OpenIDConnectPresetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OpenIDConnectPresetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Server.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Client", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Client == nil { + m.Client = &OpenIDConnectClientAuthentication{} + } + if err := m.Client.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShootSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ShootSelector == nil { + m.ShootSelector = &v1.LabelSelector{} + } + if err := m.ShootSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Weight |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/generated.proto b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/generated.proto new file mode 100644 index 000000000..b51771f95 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/generated.proto @@ -0,0 +1,167 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package github.com.gardener.gardener.pkg.apis.settings.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// ClusterOpenIDConnectPreset is a OpenID Connect configuration that is applied +// to a Shoot objects cluster-wide. +message ClusterOpenIDConnectPreset { + // Standard object metadata. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is the specification of this OpenIDConnect preset. + optional ClusterOpenIDConnectPresetSpec spec = 2; +} + +// ClusterOpenIDConnectPresetList is a collection of ClusterOpenIDConnectPresets. +message ClusterOpenIDConnectPresetList { + // Standard list object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ClusterOpenIDConnectPresets. + repeated ClusterOpenIDConnectPreset items = 2; +} + +// ClusterOpenIDConnectPresetSpec contains the OpenIDConnect specification and +// project selector matching Shoots in Projects. +message ClusterOpenIDConnectPresetSpec { + optional OpenIDConnectPresetSpec openIDConnectPresetSpec = 1; + + // Project decides whether to apply the configuration if the + // Shoot is in a specific Project matching the label selector. + // Use the selector only if the OIDC Preset is opt-in, because end + // users may skip the admission by setting the labels. + // Defaults to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector projectSelector = 2; +} + +// KubeAPIServerOpenIDConnect contains configuration settings for the OIDC provider. +// Note: Descriptions were taken from the Kubernetes documentation. +message KubeAPIServerOpenIDConnect { + // If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used. + // +optional + optional string caBundle = 1; + + // The client ID for the OpenID Connect client. + // Required. + optional string clientID = 2; + + // If provided, the name of a custom OpenID Connect claim for specifying user groups. The claim value is expected to be a string or array of strings. This field is experimental, please see the authentication documentation for further details. + // +optional + optional string groupsClaim = 3; + + // If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies. + // +optional + optional string groupsPrefix = 4; + + // The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT). + // Required. + optional string issuerURL = 5; + + // key=value pairs that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value. + // Only applied when the Kubernetes version of the Shoot is >= 1.11 + // +optional + map requiredClaims = 6; + + // List of allowed JOSE asymmetric signing algorithms. JWTs with a 'alg' header value not in this list will be rejected. Values are defined by RFC 7518 https://tools.ietf.org/html/rfc7518#section-3.1 + // Defaults to [RS256] + // +optional + repeated string signingAlgs = 7; + + // The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable. This field is experimental, please see the authentication documentation for further details. + // Defaults to "sub". + // +optional + optional string usernameClaim = 8; + + // If provided, all usernames will be prefixed with this value. If not provided, username claims other than 'email' are prefixed by the issuer URL to avoid clashes. To skip any prefixing, provide the value '-'. + // +optional + optional string usernamePrefix = 9; +} + +// OpenIDConnectClientAuthentication contains configuration for OIDC clients. +message OpenIDConnectClientAuthentication { + // The client Secret for the OpenID Connect client. + // +optional + optional string secret = 1; + + // Extra configuration added to kubeconfig's auth-provider. + // Must not be any of idp-issuer-url, client-id, client-secret, idp-certificate-authority, idp-certificate-authority-data, id-token or refresh-token + // +optional + map extraConfig = 2; +} + +// OpenIDConnectPreset is a OpenID Connect configuration that is applied +// to a Shoot in a namespace. +message OpenIDConnectPreset { + // Standard object metadata. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is the specification of this OpenIDConnect preset. + optional OpenIDConnectPresetSpec spec = 2; +} + +// OpenIDConnectPresetList is a collection of OpenIDConnectPresets. +message OpenIDConnectPresetList { + // Standard list object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of OpenIDConnectPresets. + repeated OpenIDConnectPreset items = 2; +} + +// OpenIDConnectPresetSpec contains the Shoot selector for which +// a specific OpenID Connect configuration is applied. +message OpenIDConnectPresetSpec { + // Server contains the kube-apiserver's OpenID Connect configuration. + // This configuration is not overwritting any existing OpenID Connect + // configuration already set on the Shoot object. + optional KubeAPIServerOpenIDConnect server = 1; + + // Client contains the configuration used for client OIDC authentication + // of Shoot clusters. + // This configuration is not overwritting any existing OpenID Connect + // client authentication already set on the Shoot object. + // +optional + optional OpenIDConnectClientAuthentication client = 2; + + // ShootSelector decides whether to apply the configuration if the + // Shoot has matching labels. + // Use the selector only if the OIDC Preset is opt-in, because end + // users may skip the admission by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector shootSelector = 3; + + // Weight associated with matching the corresponding preset, + // in the range 1-100. + // Required. + optional int32 weight = 4; +} + diff --git a/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/register.go b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/register.go new file mode 100644 index 000000000..c783212b7 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/register.go @@ -0,0 +1,57 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the name of the settings API group. +const GroupName = "settings.gardener.cloud" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is a new Scheme Builder which registers our API. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) + localSchemeBuilder = &SchemeBuilder + // AddToScheme is a reference to the Scheme Builder's AddToScheme function. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ClusterOpenIDConnectPreset{}, + &ClusterOpenIDConnectPresetList{}, + &OpenIDConnectPreset{}, + &OpenIDConnectPresetList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/types_cluster_openidconnect_preset.go b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/types_cluster_openidconnect_preset.go new file mode 100644 index 000000000..8422e9e77 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/types_cluster_openidconnect_preset.go @@ -0,0 +1,59 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterOpenIDConnectPreset is a OpenID Connect configuration that is applied +// to a Shoot objects cluster-wide. +type ClusterOpenIDConnectPreset struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Spec is the specification of this OpenIDConnect preset. + Spec ClusterOpenIDConnectPresetSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// ClusterOpenIDConnectPresetSpec contains the OpenIDConnect specification and +// project selector matching Shoots in Projects. +type ClusterOpenIDConnectPresetSpec struct { + OpenIDConnectPresetSpec `json:",inline" protobuf:"bytes,1,opt,name=openIDConnectPresetSpec"` + + // Project decides whether to apply the configuration if the + // Shoot is in a specific Project matching the label selector. + // Use the selector only if the OIDC Preset is opt-in, because end + // users may skip the admission by setting the labels. + // Defaults to the empty LabelSelector, which matches everything. + // +optional + ProjectSelector *metav1.LabelSelector `json:"projectSelector,omitempty" protobuf:"bytes,2,opt,name=projectSelector"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterOpenIDConnectPresetList is a collection of ClusterOpenIDConnectPresets. +type ClusterOpenIDConnectPresetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of ClusterOpenIDConnectPresets. + Items []ClusterOpenIDConnectPreset `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/types_openidconnect_preset.go b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/types_openidconnect_preset.go new file mode 100644 index 000000000..b5ae47697 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/types_openidconnect_preset.go @@ -0,0 +1,44 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenIDConnectPreset is a OpenID Connect configuration that is applied +// to a Shoot in a namespace. +type OpenIDConnectPreset struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Spec is the specification of this OpenIDConnect preset. + Spec OpenIDConnectPresetSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenIDConnectPresetList is a collection of OpenIDConnectPresets. +type OpenIDConnectPresetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of OpenIDConnectPresets. + Items []OpenIDConnectPreset `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/types_shared.go b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/types_shared.go new file mode 100644 index 000000000..01b4e840e --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/types_shared.go @@ -0,0 +1,105 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // DefaultUsernameClaim is the default username claim. + DefaultUsernameClaim = "sub" + // DefaultSignAlg is the default signing algorithm. + DefaultSignAlg = "RS256" +) + +// OpenIDConnectPresetSpec contains the Shoot selector for which +// a specific OpenID Connect configuration is applied. +type OpenIDConnectPresetSpec struct { + + // Server contains the kube-apiserver's OpenID Connect configuration. + // This configuration is not overwritting any existing OpenID Connect + // configuration already set on the Shoot object. + Server KubeAPIServerOpenIDConnect `json:"server" protobuf:"bytes,1,opt,name=server"` + + // Client contains the configuration used for client OIDC authentication + // of Shoot clusters. + // This configuration is not overwritting any existing OpenID Connect + // client authentication already set on the Shoot object. + // +optional + Client *OpenIDConnectClientAuthentication `json:"client,omitempty" protobuf:"bytes,2,opt,name=client"` + + // ShootSelector decides whether to apply the configuration if the + // Shoot has matching labels. + // Use the selector only if the OIDC Preset is opt-in, because end + // users may skip the admission by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ShootSelector *metav1.LabelSelector `json:"shootSelector,omitempty" protobuf:"bytes,3,opt,name=shootSelector"` + + // Weight associated with matching the corresponding preset, + // in the range 1-100. + // Required. + Weight int32 `json:"weight" protobuf:"varint,4,opt,name=weight"` +} + +// This is copied to not depend on the gardener package and keep it version agnostic. + +// KubeAPIServerOpenIDConnect contains configuration settings for the OIDC provider. +// Note: Descriptions were taken from the Kubernetes documentation. +type KubeAPIServerOpenIDConnect struct { + // If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used. + // +optional + CABundle *string `json:"caBundle,omitempty" protobuf:"bytes,1,opt,name=caBundle"` + // The client ID for the OpenID Connect client. + // Required. + ClientID string `json:"clientID" protobuf:"bytes,2,opt,name=clientID"` + // If provided, the name of a custom OpenID Connect claim for specifying user groups. The claim value is expected to be a string or array of strings. This field is experimental, please see the authentication documentation for further details. + // +optional + GroupsClaim *string `json:"groupsClaim,omitempty" protobuf:"bytes,3,opt,name=groupsClaim"` + // If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies. + // +optional + GroupsPrefix *string `json:"groupsPrefix,omitempty" protobuf:"bytes,4,opt,name=groupsPrefix"` + // The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT). + // Required. + IssuerURL string `json:"issuerURL" protobuf:"bytes,5,opt,name=issuerURL"` + // key=value pairs that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value. + // Only applied when the Kubernetes version of the Shoot is >= 1.11 + // +optional + RequiredClaims map[string]string `json:"requiredClaims,omitempty" protobuf:"bytes,6,rep,name=requiredClaims"` + // List of allowed JOSE asymmetric signing algorithms. JWTs with a 'alg' header value not in this list will be rejected. Values are defined by RFC 7518 https://tools.ietf.org/html/rfc7518#section-3.1 + // Defaults to [RS256] + // +optional + SigningAlgs []string `json:"signingAlgs,omitempty" protobuf:"bytes,7,rep,name=signingAlgs"` + // The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable. This field is experimental, please see the authentication documentation for further details. + // Defaults to "sub". + // +optional + UsernameClaim *string `json:"usernameClaim,omitempty" protobuf:"bytes,8,opt,name=usernameClaim"` + // If provided, all usernames will be prefixed with this value. If not provided, username claims other than 'email' are prefixed by the issuer URL to avoid clashes. To skip any prefixing, provide the value '-'. + // +optional + UsernamePrefix *string `json:"usernamePrefix,omitempty" protobuf:"bytes,9,opt,name=usernamePrefix"` +} + +// OpenIDConnectClientAuthentication contains configuration for OIDC clients. +type OpenIDConnectClientAuthentication struct { + // The client Secret for the OpenID Connect client. + // +optional + Secret *string `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"` + + // Extra configuration added to kubeconfig's auth-provider. + // Must not be any of idp-issuer-url, client-id, client-secret, idp-certificate-authority, idp-certificate-authority-data, id-token or refresh-token + // +optional + ExtraConfig map[string]string `json:"extraConfig,omitempty" protobuf:"bytes,2,rep,name=extraConfig"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/zz_generated.conversion.go b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/zz_generated.conversion.go new file mode 100644 index 000000000..453f3effe --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,330 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + unsafe "unsafe" + + settings "github.com/gardener/gardener/pkg/apis/settings" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*ClusterOpenIDConnectPreset)(nil), (*settings.ClusterOpenIDConnectPreset)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClusterOpenIDConnectPreset_To_settings_ClusterOpenIDConnectPreset(a.(*ClusterOpenIDConnectPreset), b.(*settings.ClusterOpenIDConnectPreset), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*settings.ClusterOpenIDConnectPreset)(nil), (*ClusterOpenIDConnectPreset)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_settings_ClusterOpenIDConnectPreset_To_v1alpha1_ClusterOpenIDConnectPreset(a.(*settings.ClusterOpenIDConnectPreset), b.(*ClusterOpenIDConnectPreset), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClusterOpenIDConnectPresetList)(nil), (*settings.ClusterOpenIDConnectPresetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClusterOpenIDConnectPresetList_To_settings_ClusterOpenIDConnectPresetList(a.(*ClusterOpenIDConnectPresetList), b.(*settings.ClusterOpenIDConnectPresetList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*settings.ClusterOpenIDConnectPresetList)(nil), (*ClusterOpenIDConnectPresetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_settings_ClusterOpenIDConnectPresetList_To_v1alpha1_ClusterOpenIDConnectPresetList(a.(*settings.ClusterOpenIDConnectPresetList), b.(*ClusterOpenIDConnectPresetList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClusterOpenIDConnectPresetSpec)(nil), (*settings.ClusterOpenIDConnectPresetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClusterOpenIDConnectPresetSpec_To_settings_ClusterOpenIDConnectPresetSpec(a.(*ClusterOpenIDConnectPresetSpec), b.(*settings.ClusterOpenIDConnectPresetSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*settings.ClusterOpenIDConnectPresetSpec)(nil), (*ClusterOpenIDConnectPresetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_settings_ClusterOpenIDConnectPresetSpec_To_v1alpha1_ClusterOpenIDConnectPresetSpec(a.(*settings.ClusterOpenIDConnectPresetSpec), b.(*ClusterOpenIDConnectPresetSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubeAPIServerOpenIDConnect)(nil), (*settings.KubeAPIServerOpenIDConnect)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_KubeAPIServerOpenIDConnect_To_settings_KubeAPIServerOpenIDConnect(a.(*KubeAPIServerOpenIDConnect), b.(*settings.KubeAPIServerOpenIDConnect), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*settings.KubeAPIServerOpenIDConnect)(nil), (*KubeAPIServerOpenIDConnect)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_settings_KubeAPIServerOpenIDConnect_To_v1alpha1_KubeAPIServerOpenIDConnect(a.(*settings.KubeAPIServerOpenIDConnect), b.(*KubeAPIServerOpenIDConnect), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*OpenIDConnectClientAuthentication)(nil), (*settings.OpenIDConnectClientAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_OpenIDConnectClientAuthentication_To_settings_OpenIDConnectClientAuthentication(a.(*OpenIDConnectClientAuthentication), b.(*settings.OpenIDConnectClientAuthentication), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*settings.OpenIDConnectClientAuthentication)(nil), (*OpenIDConnectClientAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_settings_OpenIDConnectClientAuthentication_To_v1alpha1_OpenIDConnectClientAuthentication(a.(*settings.OpenIDConnectClientAuthentication), b.(*OpenIDConnectClientAuthentication), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*OpenIDConnectPreset)(nil), (*settings.OpenIDConnectPreset)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_OpenIDConnectPreset_To_settings_OpenIDConnectPreset(a.(*OpenIDConnectPreset), b.(*settings.OpenIDConnectPreset), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*settings.OpenIDConnectPreset)(nil), (*OpenIDConnectPreset)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_settings_OpenIDConnectPreset_To_v1alpha1_OpenIDConnectPreset(a.(*settings.OpenIDConnectPreset), b.(*OpenIDConnectPreset), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*OpenIDConnectPresetList)(nil), (*settings.OpenIDConnectPresetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_OpenIDConnectPresetList_To_settings_OpenIDConnectPresetList(a.(*OpenIDConnectPresetList), b.(*settings.OpenIDConnectPresetList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*settings.OpenIDConnectPresetList)(nil), (*OpenIDConnectPresetList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_settings_OpenIDConnectPresetList_To_v1alpha1_OpenIDConnectPresetList(a.(*settings.OpenIDConnectPresetList), b.(*OpenIDConnectPresetList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*OpenIDConnectPresetSpec)(nil), (*settings.OpenIDConnectPresetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_OpenIDConnectPresetSpec_To_settings_OpenIDConnectPresetSpec(a.(*OpenIDConnectPresetSpec), b.(*settings.OpenIDConnectPresetSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*settings.OpenIDConnectPresetSpec)(nil), (*OpenIDConnectPresetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_settings_OpenIDConnectPresetSpec_To_v1alpha1_OpenIDConnectPresetSpec(a.(*settings.OpenIDConnectPresetSpec), b.(*OpenIDConnectPresetSpec), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_ClusterOpenIDConnectPreset_To_settings_ClusterOpenIDConnectPreset(in *ClusterOpenIDConnectPreset, out *settings.ClusterOpenIDConnectPreset, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_ClusterOpenIDConnectPresetSpec_To_settings_ClusterOpenIDConnectPresetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_ClusterOpenIDConnectPreset_To_settings_ClusterOpenIDConnectPreset is an autogenerated conversion function. +func Convert_v1alpha1_ClusterOpenIDConnectPreset_To_settings_ClusterOpenIDConnectPreset(in *ClusterOpenIDConnectPreset, out *settings.ClusterOpenIDConnectPreset, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterOpenIDConnectPreset_To_settings_ClusterOpenIDConnectPreset(in, out, s) +} + +func autoConvert_settings_ClusterOpenIDConnectPreset_To_v1alpha1_ClusterOpenIDConnectPreset(in *settings.ClusterOpenIDConnectPreset, out *ClusterOpenIDConnectPreset, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_settings_ClusterOpenIDConnectPresetSpec_To_v1alpha1_ClusterOpenIDConnectPresetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_settings_ClusterOpenIDConnectPreset_To_v1alpha1_ClusterOpenIDConnectPreset is an autogenerated conversion function. +func Convert_settings_ClusterOpenIDConnectPreset_To_v1alpha1_ClusterOpenIDConnectPreset(in *settings.ClusterOpenIDConnectPreset, out *ClusterOpenIDConnectPreset, s conversion.Scope) error { + return autoConvert_settings_ClusterOpenIDConnectPreset_To_v1alpha1_ClusterOpenIDConnectPreset(in, out, s) +} + +func autoConvert_v1alpha1_ClusterOpenIDConnectPresetList_To_settings_ClusterOpenIDConnectPresetList(in *ClusterOpenIDConnectPresetList, out *settings.ClusterOpenIDConnectPresetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]settings.ClusterOpenIDConnectPreset)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_ClusterOpenIDConnectPresetList_To_settings_ClusterOpenIDConnectPresetList is an autogenerated conversion function. +func Convert_v1alpha1_ClusterOpenIDConnectPresetList_To_settings_ClusterOpenIDConnectPresetList(in *ClusterOpenIDConnectPresetList, out *settings.ClusterOpenIDConnectPresetList, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterOpenIDConnectPresetList_To_settings_ClusterOpenIDConnectPresetList(in, out, s) +} + +func autoConvert_settings_ClusterOpenIDConnectPresetList_To_v1alpha1_ClusterOpenIDConnectPresetList(in *settings.ClusterOpenIDConnectPresetList, out *ClusterOpenIDConnectPresetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]ClusterOpenIDConnectPreset)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_settings_ClusterOpenIDConnectPresetList_To_v1alpha1_ClusterOpenIDConnectPresetList is an autogenerated conversion function. +func Convert_settings_ClusterOpenIDConnectPresetList_To_v1alpha1_ClusterOpenIDConnectPresetList(in *settings.ClusterOpenIDConnectPresetList, out *ClusterOpenIDConnectPresetList, s conversion.Scope) error { + return autoConvert_settings_ClusterOpenIDConnectPresetList_To_v1alpha1_ClusterOpenIDConnectPresetList(in, out, s) +} + +func autoConvert_v1alpha1_ClusterOpenIDConnectPresetSpec_To_settings_ClusterOpenIDConnectPresetSpec(in *ClusterOpenIDConnectPresetSpec, out *settings.ClusterOpenIDConnectPresetSpec, s conversion.Scope) error { + if err := Convert_v1alpha1_OpenIDConnectPresetSpec_To_settings_OpenIDConnectPresetSpec(&in.OpenIDConnectPresetSpec, &out.OpenIDConnectPresetSpec, s); err != nil { + return err + } + out.ProjectSelector = (*v1.LabelSelector)(unsafe.Pointer(in.ProjectSelector)) + return nil +} + +// Convert_v1alpha1_ClusterOpenIDConnectPresetSpec_To_settings_ClusterOpenIDConnectPresetSpec is an autogenerated conversion function. +func Convert_v1alpha1_ClusterOpenIDConnectPresetSpec_To_settings_ClusterOpenIDConnectPresetSpec(in *ClusterOpenIDConnectPresetSpec, out *settings.ClusterOpenIDConnectPresetSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterOpenIDConnectPresetSpec_To_settings_ClusterOpenIDConnectPresetSpec(in, out, s) +} + +func autoConvert_settings_ClusterOpenIDConnectPresetSpec_To_v1alpha1_ClusterOpenIDConnectPresetSpec(in *settings.ClusterOpenIDConnectPresetSpec, out *ClusterOpenIDConnectPresetSpec, s conversion.Scope) error { + if err := Convert_settings_OpenIDConnectPresetSpec_To_v1alpha1_OpenIDConnectPresetSpec(&in.OpenIDConnectPresetSpec, &out.OpenIDConnectPresetSpec, s); err != nil { + return err + } + out.ProjectSelector = (*v1.LabelSelector)(unsafe.Pointer(in.ProjectSelector)) + return nil +} + +// Convert_settings_ClusterOpenIDConnectPresetSpec_To_v1alpha1_ClusterOpenIDConnectPresetSpec is an autogenerated conversion function. +func Convert_settings_ClusterOpenIDConnectPresetSpec_To_v1alpha1_ClusterOpenIDConnectPresetSpec(in *settings.ClusterOpenIDConnectPresetSpec, out *ClusterOpenIDConnectPresetSpec, s conversion.Scope) error { + return autoConvert_settings_ClusterOpenIDConnectPresetSpec_To_v1alpha1_ClusterOpenIDConnectPresetSpec(in, out, s) +} + +func autoConvert_v1alpha1_KubeAPIServerOpenIDConnect_To_settings_KubeAPIServerOpenIDConnect(in *KubeAPIServerOpenIDConnect, out *settings.KubeAPIServerOpenIDConnect, s conversion.Scope) error { + out.CABundle = (*string)(unsafe.Pointer(in.CABundle)) + out.ClientID = in.ClientID + out.GroupsClaim = (*string)(unsafe.Pointer(in.GroupsClaim)) + out.GroupsPrefix = (*string)(unsafe.Pointer(in.GroupsPrefix)) + out.IssuerURL = in.IssuerURL + out.RequiredClaims = *(*map[string]string)(unsafe.Pointer(&in.RequiredClaims)) + out.SigningAlgs = *(*[]string)(unsafe.Pointer(&in.SigningAlgs)) + out.UsernameClaim = (*string)(unsafe.Pointer(in.UsernameClaim)) + out.UsernamePrefix = (*string)(unsafe.Pointer(in.UsernamePrefix)) + return nil +} + +// Convert_v1alpha1_KubeAPIServerOpenIDConnect_To_settings_KubeAPIServerOpenIDConnect is an autogenerated conversion function. +func Convert_v1alpha1_KubeAPIServerOpenIDConnect_To_settings_KubeAPIServerOpenIDConnect(in *KubeAPIServerOpenIDConnect, out *settings.KubeAPIServerOpenIDConnect, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeAPIServerOpenIDConnect_To_settings_KubeAPIServerOpenIDConnect(in, out, s) +} + +func autoConvert_settings_KubeAPIServerOpenIDConnect_To_v1alpha1_KubeAPIServerOpenIDConnect(in *settings.KubeAPIServerOpenIDConnect, out *KubeAPIServerOpenIDConnect, s conversion.Scope) error { + out.CABundle = (*string)(unsafe.Pointer(in.CABundle)) + out.ClientID = in.ClientID + out.GroupsClaim = (*string)(unsafe.Pointer(in.GroupsClaim)) + out.GroupsPrefix = (*string)(unsafe.Pointer(in.GroupsPrefix)) + out.IssuerURL = in.IssuerURL + out.RequiredClaims = *(*map[string]string)(unsafe.Pointer(&in.RequiredClaims)) + out.SigningAlgs = *(*[]string)(unsafe.Pointer(&in.SigningAlgs)) + out.UsernameClaim = (*string)(unsafe.Pointer(in.UsernameClaim)) + out.UsernamePrefix = (*string)(unsafe.Pointer(in.UsernamePrefix)) + return nil +} + +// Convert_settings_KubeAPIServerOpenIDConnect_To_v1alpha1_KubeAPIServerOpenIDConnect is an autogenerated conversion function. +func Convert_settings_KubeAPIServerOpenIDConnect_To_v1alpha1_KubeAPIServerOpenIDConnect(in *settings.KubeAPIServerOpenIDConnect, out *KubeAPIServerOpenIDConnect, s conversion.Scope) error { + return autoConvert_settings_KubeAPIServerOpenIDConnect_To_v1alpha1_KubeAPIServerOpenIDConnect(in, out, s) +} + +func autoConvert_v1alpha1_OpenIDConnectClientAuthentication_To_settings_OpenIDConnectClientAuthentication(in *OpenIDConnectClientAuthentication, out *settings.OpenIDConnectClientAuthentication, s conversion.Scope) error { + out.Secret = (*string)(unsafe.Pointer(in.Secret)) + out.ExtraConfig = *(*map[string]string)(unsafe.Pointer(&in.ExtraConfig)) + return nil +} + +// Convert_v1alpha1_OpenIDConnectClientAuthentication_To_settings_OpenIDConnectClientAuthentication is an autogenerated conversion function. +func Convert_v1alpha1_OpenIDConnectClientAuthentication_To_settings_OpenIDConnectClientAuthentication(in *OpenIDConnectClientAuthentication, out *settings.OpenIDConnectClientAuthentication, s conversion.Scope) error { + return autoConvert_v1alpha1_OpenIDConnectClientAuthentication_To_settings_OpenIDConnectClientAuthentication(in, out, s) +} + +func autoConvert_settings_OpenIDConnectClientAuthentication_To_v1alpha1_OpenIDConnectClientAuthentication(in *settings.OpenIDConnectClientAuthentication, out *OpenIDConnectClientAuthentication, s conversion.Scope) error { + out.Secret = (*string)(unsafe.Pointer(in.Secret)) + out.ExtraConfig = *(*map[string]string)(unsafe.Pointer(&in.ExtraConfig)) + return nil +} + +// Convert_settings_OpenIDConnectClientAuthentication_To_v1alpha1_OpenIDConnectClientAuthentication is an autogenerated conversion function. +func Convert_settings_OpenIDConnectClientAuthentication_To_v1alpha1_OpenIDConnectClientAuthentication(in *settings.OpenIDConnectClientAuthentication, out *OpenIDConnectClientAuthentication, s conversion.Scope) error { + return autoConvert_settings_OpenIDConnectClientAuthentication_To_v1alpha1_OpenIDConnectClientAuthentication(in, out, s) +} + +func autoConvert_v1alpha1_OpenIDConnectPreset_To_settings_OpenIDConnectPreset(in *OpenIDConnectPreset, out *settings.OpenIDConnectPreset, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_OpenIDConnectPresetSpec_To_settings_OpenIDConnectPresetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_OpenIDConnectPreset_To_settings_OpenIDConnectPreset is an autogenerated conversion function. +func Convert_v1alpha1_OpenIDConnectPreset_To_settings_OpenIDConnectPreset(in *OpenIDConnectPreset, out *settings.OpenIDConnectPreset, s conversion.Scope) error { + return autoConvert_v1alpha1_OpenIDConnectPreset_To_settings_OpenIDConnectPreset(in, out, s) +} + +func autoConvert_settings_OpenIDConnectPreset_To_v1alpha1_OpenIDConnectPreset(in *settings.OpenIDConnectPreset, out *OpenIDConnectPreset, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_settings_OpenIDConnectPresetSpec_To_v1alpha1_OpenIDConnectPresetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_settings_OpenIDConnectPreset_To_v1alpha1_OpenIDConnectPreset is an autogenerated conversion function. +func Convert_settings_OpenIDConnectPreset_To_v1alpha1_OpenIDConnectPreset(in *settings.OpenIDConnectPreset, out *OpenIDConnectPreset, s conversion.Scope) error { + return autoConvert_settings_OpenIDConnectPreset_To_v1alpha1_OpenIDConnectPreset(in, out, s) +} + +func autoConvert_v1alpha1_OpenIDConnectPresetList_To_settings_OpenIDConnectPresetList(in *OpenIDConnectPresetList, out *settings.OpenIDConnectPresetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]settings.OpenIDConnectPreset)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_OpenIDConnectPresetList_To_settings_OpenIDConnectPresetList is an autogenerated conversion function. +func Convert_v1alpha1_OpenIDConnectPresetList_To_settings_OpenIDConnectPresetList(in *OpenIDConnectPresetList, out *settings.OpenIDConnectPresetList, s conversion.Scope) error { + return autoConvert_v1alpha1_OpenIDConnectPresetList_To_settings_OpenIDConnectPresetList(in, out, s) +} + +func autoConvert_settings_OpenIDConnectPresetList_To_v1alpha1_OpenIDConnectPresetList(in *settings.OpenIDConnectPresetList, out *OpenIDConnectPresetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]OpenIDConnectPreset)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_settings_OpenIDConnectPresetList_To_v1alpha1_OpenIDConnectPresetList is an autogenerated conversion function. +func Convert_settings_OpenIDConnectPresetList_To_v1alpha1_OpenIDConnectPresetList(in *settings.OpenIDConnectPresetList, out *OpenIDConnectPresetList, s conversion.Scope) error { + return autoConvert_settings_OpenIDConnectPresetList_To_v1alpha1_OpenIDConnectPresetList(in, out, s) +} + +func autoConvert_v1alpha1_OpenIDConnectPresetSpec_To_settings_OpenIDConnectPresetSpec(in *OpenIDConnectPresetSpec, out *settings.OpenIDConnectPresetSpec, s conversion.Scope) error { + if err := Convert_v1alpha1_KubeAPIServerOpenIDConnect_To_settings_KubeAPIServerOpenIDConnect(&in.Server, &out.Server, s); err != nil { + return err + } + out.Client = (*settings.OpenIDConnectClientAuthentication)(unsafe.Pointer(in.Client)) + out.ShootSelector = (*v1.LabelSelector)(unsafe.Pointer(in.ShootSelector)) + out.Weight = in.Weight + return nil +} + +// Convert_v1alpha1_OpenIDConnectPresetSpec_To_settings_OpenIDConnectPresetSpec is an autogenerated conversion function. +func Convert_v1alpha1_OpenIDConnectPresetSpec_To_settings_OpenIDConnectPresetSpec(in *OpenIDConnectPresetSpec, out *settings.OpenIDConnectPresetSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_OpenIDConnectPresetSpec_To_settings_OpenIDConnectPresetSpec(in, out, s) +} + +func autoConvert_settings_OpenIDConnectPresetSpec_To_v1alpha1_OpenIDConnectPresetSpec(in *settings.OpenIDConnectPresetSpec, out *OpenIDConnectPresetSpec, s conversion.Scope) error { + if err := Convert_settings_KubeAPIServerOpenIDConnect_To_v1alpha1_KubeAPIServerOpenIDConnect(&in.Server, &out.Server, s); err != nil { + return err + } + out.Client = (*OpenIDConnectClientAuthentication)(unsafe.Pointer(in.Client)) + out.ShootSelector = (*v1.LabelSelector)(unsafe.Pointer(in.ShootSelector)) + out.Weight = in.Weight + return nil +} + +// Convert_settings_OpenIDConnectPresetSpec_To_v1alpha1_OpenIDConnectPresetSpec is an autogenerated conversion function. +func Convert_settings_OpenIDConnectPresetSpec_To_v1alpha1_OpenIDConnectPresetSpec(in *settings.OpenIDConnectPresetSpec, out *OpenIDConnectPresetSpec, s conversion.Scope) error { + return autoConvert_settings_OpenIDConnectPresetSpec_To_v1alpha1_OpenIDConnectPresetSpec(in, out, s) +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..39a9acb65 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,276 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOpenIDConnectPreset) DeepCopyInto(out *ClusterOpenIDConnectPreset) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOpenIDConnectPreset. +func (in *ClusterOpenIDConnectPreset) DeepCopy() *ClusterOpenIDConnectPreset { + if in == nil { + return nil + } + out := new(ClusterOpenIDConnectPreset) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterOpenIDConnectPreset) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOpenIDConnectPresetList) DeepCopyInto(out *ClusterOpenIDConnectPresetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterOpenIDConnectPreset, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOpenIDConnectPresetList. +func (in *ClusterOpenIDConnectPresetList) DeepCopy() *ClusterOpenIDConnectPresetList { + if in == nil { + return nil + } + out := new(ClusterOpenIDConnectPresetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterOpenIDConnectPresetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOpenIDConnectPresetSpec) DeepCopyInto(out *ClusterOpenIDConnectPresetSpec) { + *out = *in + in.OpenIDConnectPresetSpec.DeepCopyInto(&out.OpenIDConnectPresetSpec) + if in.ProjectSelector != nil { + in, out := &in.ProjectSelector, &out.ProjectSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOpenIDConnectPresetSpec. +func (in *ClusterOpenIDConnectPresetSpec) DeepCopy() *ClusterOpenIDConnectPresetSpec { + if in == nil { + return nil + } + out := new(ClusterOpenIDConnectPresetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerOpenIDConnect) DeepCopyInto(out *KubeAPIServerOpenIDConnect) { + *out = *in + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = new(string) + **out = **in + } + if in.GroupsClaim != nil { + in, out := &in.GroupsClaim, &out.GroupsClaim + *out = new(string) + **out = **in + } + if in.GroupsPrefix != nil { + in, out := &in.GroupsPrefix, &out.GroupsPrefix + *out = new(string) + **out = **in + } + if in.RequiredClaims != nil { + in, out := &in.RequiredClaims, &out.RequiredClaims + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.SigningAlgs != nil { + in, out := &in.SigningAlgs, &out.SigningAlgs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UsernameClaim != nil { + in, out := &in.UsernameClaim, &out.UsernameClaim + *out = new(string) + **out = **in + } + if in.UsernamePrefix != nil { + in, out := &in.UsernamePrefix, &out.UsernamePrefix + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerOpenIDConnect. +func (in *KubeAPIServerOpenIDConnect) DeepCopy() *KubeAPIServerOpenIDConnect { + if in == nil { + return nil + } + out := new(KubeAPIServerOpenIDConnect) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDConnectClientAuthentication) DeepCopyInto(out *OpenIDConnectClientAuthentication) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(string) + **out = **in + } + if in.ExtraConfig != nil { + in, out := &in.ExtraConfig, &out.ExtraConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectClientAuthentication. +func (in *OpenIDConnectClientAuthentication) DeepCopy() *OpenIDConnectClientAuthentication { + if in == nil { + return nil + } + out := new(OpenIDConnectClientAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDConnectPreset) DeepCopyInto(out *OpenIDConnectPreset) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectPreset. +func (in *OpenIDConnectPreset) DeepCopy() *OpenIDConnectPreset { + if in == nil { + return nil + } + out := new(OpenIDConnectPreset) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenIDConnectPreset) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDConnectPresetList) DeepCopyInto(out *OpenIDConnectPresetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenIDConnectPreset, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectPresetList. +func (in *OpenIDConnectPresetList) DeepCopy() *OpenIDConnectPresetList { + if in == nil { + return nil + } + out := new(OpenIDConnectPresetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenIDConnectPresetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDConnectPresetSpec) DeepCopyInto(out *OpenIDConnectPresetSpec) { + *out = *in + in.Server.DeepCopyInto(&out.Server) + if in.Client != nil { + in, out := &in.Client, &out.Client + *out = new(OpenIDConnectClientAuthentication) + (*in).DeepCopyInto(*out) + } + if in.ShootSelector != nil { + in, out := &in.ShootSelector, &out.ShootSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectPresetSpec. +func (in *OpenIDConnectPresetSpec) DeepCopy() *OpenIDConnectPresetSpec { + if in == nil { + return nil + } + out := new(OpenIDConnectPresetSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/zz_generated.defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/zz_generated.defaults.go new file mode 100644 index 000000000..9711fbf95 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/settings/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,60 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&ClusterOpenIDConnectPreset{}, func(obj interface{}) { SetObjectDefaults_ClusterOpenIDConnectPreset(obj.(*ClusterOpenIDConnectPreset)) }) + scheme.AddTypeDefaultingFunc(&ClusterOpenIDConnectPresetList{}, func(obj interface{}) { + SetObjectDefaults_ClusterOpenIDConnectPresetList(obj.(*ClusterOpenIDConnectPresetList)) + }) + scheme.AddTypeDefaultingFunc(&OpenIDConnectPreset{}, func(obj interface{}) { SetObjectDefaults_OpenIDConnectPreset(obj.(*OpenIDConnectPreset)) }) + scheme.AddTypeDefaultingFunc(&OpenIDConnectPresetList{}, func(obj interface{}) { SetObjectDefaults_OpenIDConnectPresetList(obj.(*OpenIDConnectPresetList)) }) + return nil +} + +func SetObjectDefaults_ClusterOpenIDConnectPreset(in *ClusterOpenIDConnectPreset) { + SetDefaults_ClusterOpenIDConnectPreset(in) +} + +func SetObjectDefaults_ClusterOpenIDConnectPresetList(in *ClusterOpenIDConnectPresetList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ClusterOpenIDConnectPreset(a) + } +} + +func SetObjectDefaults_OpenIDConnectPreset(in *OpenIDConnectPreset) { + SetDefaults_OpenIDConnectPreset(in) +} + +func SetObjectDefaults_OpenIDConnectPresetList(in *OpenIDConnectPresetList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_OpenIDConnectPreset(a) + } +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/settings/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/settings/zz_generated.deepcopy.go new file mode 100644 index 000000000..f79064cc9 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/settings/zz_generated.deepcopy.go @@ -0,0 +1,276 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package settings + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOpenIDConnectPreset) DeepCopyInto(out *ClusterOpenIDConnectPreset) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOpenIDConnectPreset. +func (in *ClusterOpenIDConnectPreset) DeepCopy() *ClusterOpenIDConnectPreset { + if in == nil { + return nil + } + out := new(ClusterOpenIDConnectPreset) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterOpenIDConnectPreset) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOpenIDConnectPresetList) DeepCopyInto(out *ClusterOpenIDConnectPresetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterOpenIDConnectPreset, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOpenIDConnectPresetList. +func (in *ClusterOpenIDConnectPresetList) DeepCopy() *ClusterOpenIDConnectPresetList { + if in == nil { + return nil + } + out := new(ClusterOpenIDConnectPresetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterOpenIDConnectPresetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOpenIDConnectPresetSpec) DeepCopyInto(out *ClusterOpenIDConnectPresetSpec) { + *out = *in + in.OpenIDConnectPresetSpec.DeepCopyInto(&out.OpenIDConnectPresetSpec) + if in.ProjectSelector != nil { + in, out := &in.ProjectSelector, &out.ProjectSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOpenIDConnectPresetSpec. +func (in *ClusterOpenIDConnectPresetSpec) DeepCopy() *ClusterOpenIDConnectPresetSpec { + if in == nil { + return nil + } + out := new(ClusterOpenIDConnectPresetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerOpenIDConnect) DeepCopyInto(out *KubeAPIServerOpenIDConnect) { + *out = *in + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = new(string) + **out = **in + } + if in.GroupsClaim != nil { + in, out := &in.GroupsClaim, &out.GroupsClaim + *out = new(string) + **out = **in + } + if in.GroupsPrefix != nil { + in, out := &in.GroupsPrefix, &out.GroupsPrefix + *out = new(string) + **out = **in + } + if in.RequiredClaims != nil { + in, out := &in.RequiredClaims, &out.RequiredClaims + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.SigningAlgs != nil { + in, out := &in.SigningAlgs, &out.SigningAlgs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UsernameClaim != nil { + in, out := &in.UsernameClaim, &out.UsernameClaim + *out = new(string) + **out = **in + } + if in.UsernamePrefix != nil { + in, out := &in.UsernamePrefix, &out.UsernamePrefix + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerOpenIDConnect. +func (in *KubeAPIServerOpenIDConnect) DeepCopy() *KubeAPIServerOpenIDConnect { + if in == nil { + return nil + } + out := new(KubeAPIServerOpenIDConnect) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDConnectClientAuthentication) DeepCopyInto(out *OpenIDConnectClientAuthentication) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(string) + **out = **in + } + if in.ExtraConfig != nil { + in, out := &in.ExtraConfig, &out.ExtraConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectClientAuthentication. +func (in *OpenIDConnectClientAuthentication) DeepCopy() *OpenIDConnectClientAuthentication { + if in == nil { + return nil + } + out := new(OpenIDConnectClientAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDConnectPreset) DeepCopyInto(out *OpenIDConnectPreset) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectPreset. +func (in *OpenIDConnectPreset) DeepCopy() *OpenIDConnectPreset { + if in == nil { + return nil + } + out := new(OpenIDConnectPreset) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenIDConnectPreset) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDConnectPresetList) DeepCopyInto(out *OpenIDConnectPresetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenIDConnectPreset, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectPresetList. +func (in *OpenIDConnectPresetList) DeepCopy() *OpenIDConnectPresetList { + if in == nil { + return nil + } + out := new(OpenIDConnectPresetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenIDConnectPresetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDConnectPresetSpec) DeepCopyInto(out *OpenIDConnectPresetSpec) { + *out = *in + in.Server.DeepCopyInto(&out.Server) + if in.Client != nil { + in, out := &in.Client, &out.Client + *out = new(OpenIDConnectClientAuthentication) + (*in).DeepCopyInto(*out) + } + if in.ShootSelector != nil { + in, out := &in.ShootSelector, &out.ShootSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectPresetSpec. +func (in *OpenIDConnectPresetSpec) DeepCopy() *OpenIDConnectPresetSpec { + if in == nil { + return nil + } + out := new(OpenIDConnectPresetSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/gardener/gardener/pkg/chartrenderer/default.go b/vendor/github.com/gardener/gardener/pkg/chartrenderer/default.go new file mode 100644 index 000000000..c549105e6 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/chartrenderer/default.go @@ -0,0 +1,197 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package chartrenderer + +import ( + "bytes" + "encoding/json" + "fmt" + "path" + "strings" + + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + + "k8s.io/helm/pkg/chartutil" + "k8s.io/helm/pkg/engine" + "k8s.io/helm/pkg/manifest" + chartapi "k8s.io/helm/pkg/proto/hapi/chart" + "k8s.io/helm/pkg/timeconv" +) + +const notesFileSuffix = "NOTES.txt" + +// chartRenderer is a struct which contains the chart render engine and a Kubernetes client. +// The chart render is used to render the Helm charts into a RenderedChart struct from which the +// resulting manifest can be generated. +type chartRenderer struct { + renderer *engine.Engine + capabilities *chartutil.Capabilities +} + +// NewForConfig creates a new ChartRenderer object. It requires a Kubernetes client as input which will be +// injected in the Tiller environment. +func NewForConfig(cfg *rest.Config) (Interface, error) { + disc, err := discovery.NewDiscoveryClientForConfig(cfg) + if err != nil { + return nil, err + } + + sv, err := disc.ServerVersion() + if err != nil { + return nil, fmt.Errorf("failed to get kubernetes server version %v", err) + } + + return NewWithServerVersion(sv), nil +} + +// NewWithServerVersion creates a new chart renderer with the given server version. +func NewWithServerVersion(serverVersion *version.Info) Interface { + return &chartRenderer{ + renderer: engine.New(), + capabilities: &chartutil.Capabilities{KubeVersion: serverVersion}, + } +} + +// DiscoverCapabilities discovers the capabilities required for chart renderers using the given +// DiscoveryInterface. +func DiscoverCapabilities(disc discovery.DiscoveryInterface) (*chartutil.Capabilities, error) { + sv, err := disc.ServerVersion() + if err != nil { + return nil, fmt.Errorf("failed to get kubernetes server version %v", err) + } + + return &chartutil.Capabilities{KubeVersion: sv}, nil +} + +// Render loads the chart from the given location and calls the Render() function +// to convert it into a ChartRelease object. +func (r *chartRenderer) Render(chartPath, releaseName, namespace string, values interface{}) (*RenderedChart, error) { + chart, err := chartutil.Load(chartPath) + if err != nil { + return nil, fmt.Errorf("can't create load chart from path %s:, %s", chartPath, err) + } + return r.renderRelease(chart, releaseName, namespace, values) +} + +// RenderArchive loads the chart from the given location and calls the Render() function +// to convert it into a ChartRelease object. +func (r *chartRenderer) RenderArchive(archive []byte, releaseName, namespace string, values interface{}) (*RenderedChart, error) { + chart, err := chartutil.LoadArchive(bytes.NewReader(archive)) + if err != nil { + return nil, fmt.Errorf("can't create load chart from archive: %s", err) + } + return r.renderRelease(chart, releaseName, namespace, values) +} + +func (r *chartRenderer) renderRelease(chart *chartapi.Chart, releaseName, namespace string, values interface{}) (*RenderedChart, error) { + chartName := chart.GetMetadata().GetName() + + parsedValues, err := json.Marshal(values) + if err != nil { + return nil, fmt.Errorf("can't parse variables for chart %s: ,%s", chartName, err) + } + chartConfig := &chartapi.Config{Raw: string(parsedValues)} + + err = chartutil.ProcessRequirementsEnabled(chart, chartConfig) + if err != nil { + return nil, fmt.Errorf("can't process requirements for chart %s: ,%s", chartName, err) + } + err = chartutil.ProcessRequirementsImportValues(chart) + if err != nil { + return nil, fmt.Errorf("can't process requirements for import values for chart %s: ,%s", chartName, err) + } + + caps := r.capabilities + revision := 1 + ts := timeconv.Now() + options := chartutil.ReleaseOptions{ + Name: releaseName, + Time: ts, + Namespace: namespace, + Revision: revision, + IsInstall: true, + } + + valuesToRender, err := chartutil.ToRenderValuesCaps(chart, chartConfig, options, caps) + if err != nil { + return nil, err + } + return r.renderResources(chart, valuesToRender) +} + +func (r *chartRenderer) renderResources(ch *chartapi.Chart, values chartutil.Values) (*RenderedChart, error) { + files, err := r.renderer.Render(ch, values) + if err != nil { + return nil, err + } + + // Remove NOTES.txt and partials + for k := range files { + if strings.HasSuffix(k, notesFileSuffix) || strings.HasPrefix(path.Base(k), "_") { + delete(files, k) + } + } + + manifests := manifest.SplitManifests(files) + manifests = SortByKind(manifests) + + return &RenderedChart{ + ChartName: ch.Metadata.Name, + Manifests: manifests, + }, nil +} + +// Manifest returns the manifest of the rendered chart as byte array. +func (c *RenderedChart) Manifest() []byte { + // Aggregate all valid manifests into one big doc. + b := bytes.NewBuffer(nil) + + for _, mf := range c.Manifests { + b.WriteString("\n---\n# Source: " + mf.Name + "\n") + b.WriteString(mf.Content) + } + return b.Bytes() +} + +// Files returns all rendered manifests mapping their names to their content. +func (c *RenderedChart) Files() map[string]string { + var files = make(map[string]string) + for _, manifest := range c.Manifests { + files[manifest.Name] = manifest.Content + } + return files +} + +// FileContent returns explicitly the content of the provided . +func (c *RenderedChart) FileContent(filename string) string { + for _, mf := range c.Manifests { + if mf.Name == fmt.Sprintf("%s/templates/%s", c.ChartName, filename) { + return mf.Content + } + } + return "" +} + +// AsSecretData returns all rendered manifests that is capable for used as data of a secret +func (c *RenderedChart) AsSecretData() map[string][]byte { + data := make(map[string][]byte, len(c.Files())) + for fileName, fileContent := range c.Files() { + key := strings.ReplaceAll(fileName, "/", "_") + data[key] = []byte(fileContent) + } + return data +} diff --git a/vendor/github.com/gardener/gardener/pkg/chartrenderer/factory.go b/vendor/github.com/gardener/gardener/pkg/chartrenderer/factory.go new file mode 100644 index 000000000..e93b44f92 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/chartrenderer/factory.go @@ -0,0 +1,37 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package chartrenderer + +import ( + "k8s.io/client-go/rest" +) + +// Factory is a factory that is able to produce Interface. +type Factory interface { + NewForConfig(config *rest.Config) (Interface, error) +} + +// FactoryFunc implements the Factory interface. +type FactoryFunc func(config *rest.Config) (Interface, error) + +// NewForConfig implements Factory. +func (f FactoryFunc) NewForConfig(config *rest.Config) (Interface, error) { + return f(config) +} + +// DefaultFactory returns the default Factory. +func DefaultFactory() Factory { + return FactoryFunc(NewForConfig) +} diff --git a/vendor/github.com/gardener/gardener/pkg/chartrenderer/renderer.go b/vendor/github.com/gardener/gardener/pkg/chartrenderer/renderer.go new file mode 100644 index 000000000..dac90c2e0 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/chartrenderer/renderer.go @@ -0,0 +1,32 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package chartrenderer + +import ( + "k8s.io/helm/pkg/manifest" +) + +// Interface is an interface for rendering Helm Charts from path, name, namespace and values. +type Interface interface { + Render(chartPath, releaseName, namespace string, values interface{}) (*RenderedChart, error) + RenderArchive(archive []byte, releaseName, namespace string, values interface{}) (*RenderedChart, error) +} + +// RenderedChart holds a map of rendered templates file with template file name as key and +// rendered template as value. +type RenderedChart struct { + ChartName string + Manifests []manifest.Manifest +} diff --git a/vendor/github.com/gardener/gardener/pkg/chartrenderer/sorter.go b/vendor/github.com/gardener/gardener/pkg/chartrenderer/sorter.go new file mode 100644 index 000000000..cded1848d --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/chartrenderer/sorter.go @@ -0,0 +1,121 @@ +/* + Copyright The Helm Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// @rfranzke, @zanetworker: +// Origin of this file: https://github.com/helm/helm/blob/bed4054c412f95d140c8c98b6387f40df7f3139e/pkg/tiller/kind_sorter.go +// We have copied this to our repository to not depend on k8s.io/helm/pkg/tiller. +// This package is depending on k8s.io/helm/pkg/kube which transitively depends on k8s.io/kubernetes. +// To prevent vendor hell we don't want to depend on k8s.io/kubernetes. + +package chartrenderer + +import ( + "sort" + + "k8s.io/helm/pkg/manifest" +) + +// SortOrder is an ordering of Kinds. +type SortOrder []string + +// InstallOrder is the order in which manifests should be installed (by Kind). +// +// Those occurring earlier in the list get installed before those occurring later in the list. +var InstallOrder SortOrder = []string{ + "Namespace", + "ResourceQuota", + "LimitRange", + "PodSecurityPolicy", + "PodDisruptionBudget", + "Secret", + "ConfigMap", + "StorageClass", + "PersistentVolume", + "PersistentVolumeClaim", + "ServiceAccount", + "CustomResourceDefinition", + "ClusterRole", + "ClusterRoleBinding", + "Role", + "RoleBinding", + "Service", + "DaemonSet", + "Pod", + "ReplicationController", + "ReplicaSet", + "Deployment", + "StatefulSet", + "Job", + "CronJob", + "Ingress", + "APIService", +} + +type kindSorter struct { + ordering map[string]int + manifests []manifest.Manifest +} + +func newKindSorter(m []manifest.Manifest, s SortOrder) *kindSorter { + o := make(map[string]int, len(s)) + for v, k := range s { + o[k] = v + } + + return &kindSorter{ + manifests: m, + ordering: o, + } +} + +func (k *kindSorter) Len() int { return len(k.manifests) } + +func (k *kindSorter) Swap(i, j int) { k.manifests[i], k.manifests[j] = k.manifests[j], k.manifests[i] } + +func (k *kindSorter) Less(i, j int) bool { + a := k.manifests[i] + b := k.manifests[j] + first, aok := k.ordering[a.Head.Kind] + second, bok := k.ordering[b.Head.Kind] + + if !aok && !bok { + // if both are unknown then sort alphabetically by kind and name + if a.Head.Kind != b.Head.Kind { + return a.Head.Kind < b.Head.Kind + } + return a.Name < b.Name + } + + // unknown kind is last + if !aok { + return false + } + if !bok { + return true + } + + // if same kind sub sort alphanumeric + if first == second { + return a.Name < b.Name + } + // sort different kinds + return first < second +} + +// SortByKind sorts manifests in InstallOrder +func SortByKind(manifests []manifest.Manifest) []manifest.Manifest { + ordering := InstallOrder + ks := newKindSorter(manifests, ordering) + sort.Sort(ks) + return ks.manifests +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/clientset.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/clientset.go new file mode 100644 index 000000000..d6ec68662 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/clientset.go @@ -0,0 +1,111 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + + corev1alpha1 "github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1" + corev1beta1 "github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + CoreV1alpha1() corev1alpha1.CoreV1alpha1Interface + CoreV1beta1() corev1beta1.CoreV1beta1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + coreV1alpha1 *corev1alpha1.CoreV1alpha1Client + coreV1beta1 *corev1beta1.CoreV1beta1Client +} + +// CoreV1alpha1 retrieves the CoreV1alpha1Client +func (c *Clientset) CoreV1alpha1() corev1alpha1.CoreV1alpha1Interface { + return c.coreV1alpha1 +} + +// CoreV1beta1 retrieves the CoreV1beta1Client +func (c *Clientset) CoreV1beta1() corev1beta1.CoreV1beta1Interface { + return c.coreV1beta1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.coreV1alpha1, err = corev1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.coreV1beta1, err = corev1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.coreV1alpha1 = corev1alpha1.NewForConfigOrDie(c) + cs.coreV1beta1 = corev1beta1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.coreV1alpha1 = corev1alpha1.New(c) + cs.coreV1beta1 = corev1beta1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/doc.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/doc.go new file mode 100644 index 000000000..32d852285 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme/doc.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme/doc.go new file mode 100644 index 000000000..7d4fb776b --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme/register.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme/register.go new file mode 100644 index 000000000..aee592da4 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme/register.go @@ -0,0 +1,58 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + corev1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1" + corev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + corev1alpha1.AddToScheme, + corev1beta1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/backupbucket.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/backupbucket.go new file mode 100644 index 000000000..8e168d589 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/backupbucket.go @@ -0,0 +1,184 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// BackupBucketsGetter has a method to return a BackupBucketInterface. +// A group's client should implement this interface. +type BackupBucketsGetter interface { + BackupBuckets() BackupBucketInterface +} + +// BackupBucketInterface has methods to work with BackupBucket resources. +type BackupBucketInterface interface { + Create(ctx context.Context, backupBucket *v1alpha1.BackupBucket, opts v1.CreateOptions) (*v1alpha1.BackupBucket, error) + Update(ctx context.Context, backupBucket *v1alpha1.BackupBucket, opts v1.UpdateOptions) (*v1alpha1.BackupBucket, error) + UpdateStatus(ctx context.Context, backupBucket *v1alpha1.BackupBucket, opts v1.UpdateOptions) (*v1alpha1.BackupBucket, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.BackupBucket, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.BackupBucketList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BackupBucket, err error) + BackupBucketExpansion +} + +// backupBuckets implements BackupBucketInterface +type backupBuckets struct { + client rest.Interface +} + +// newBackupBuckets returns a BackupBuckets +func newBackupBuckets(c *CoreV1alpha1Client) *backupBuckets { + return &backupBuckets{ + client: c.RESTClient(), + } +} + +// Get takes name of the backupBucket, and returns the corresponding backupBucket object, and an error if there is any. +func (c *backupBuckets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BackupBucket, err error) { + result = &v1alpha1.BackupBucket{} + err = c.client.Get(). + Resource("backupbuckets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of BackupBuckets that match those selectors. +func (c *backupBuckets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.BackupBucketList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.BackupBucketList{} + err = c.client.Get(). + Resource("backupbuckets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested backupBuckets. +func (c *backupBuckets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("backupbuckets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a backupBucket and creates it. Returns the server's representation of the backupBucket, and an error, if there is any. +func (c *backupBuckets) Create(ctx context.Context, backupBucket *v1alpha1.BackupBucket, opts v1.CreateOptions) (result *v1alpha1.BackupBucket, err error) { + result = &v1alpha1.BackupBucket{} + err = c.client.Post(). + Resource("backupbuckets"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupBucket). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a backupBucket and updates it. Returns the server's representation of the backupBucket, and an error, if there is any. +func (c *backupBuckets) Update(ctx context.Context, backupBucket *v1alpha1.BackupBucket, opts v1.UpdateOptions) (result *v1alpha1.BackupBucket, err error) { + result = &v1alpha1.BackupBucket{} + err = c.client.Put(). + Resource("backupbuckets"). + Name(backupBucket.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupBucket). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *backupBuckets) UpdateStatus(ctx context.Context, backupBucket *v1alpha1.BackupBucket, opts v1.UpdateOptions) (result *v1alpha1.BackupBucket, err error) { + result = &v1alpha1.BackupBucket{} + err = c.client.Put(). + Resource("backupbuckets"). + Name(backupBucket.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupBucket). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the backupBucket and deletes it. Returns an error if one occurs. +func (c *backupBuckets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("backupbuckets"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *backupBuckets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("backupbuckets"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched backupBucket. +func (c *backupBuckets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BackupBucket, err error) { + result = &v1alpha1.BackupBucket{} + err = c.client.Patch(pt). + Resource("backupbuckets"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/backupentry.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/backupentry.go new file mode 100644 index 000000000..cf367142c --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/backupentry.go @@ -0,0 +1,195 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// BackupEntriesGetter has a method to return a BackupEntryInterface. +// A group's client should implement this interface. +type BackupEntriesGetter interface { + BackupEntries(namespace string) BackupEntryInterface +} + +// BackupEntryInterface has methods to work with BackupEntry resources. +type BackupEntryInterface interface { + Create(ctx context.Context, backupEntry *v1alpha1.BackupEntry, opts v1.CreateOptions) (*v1alpha1.BackupEntry, error) + Update(ctx context.Context, backupEntry *v1alpha1.BackupEntry, opts v1.UpdateOptions) (*v1alpha1.BackupEntry, error) + UpdateStatus(ctx context.Context, backupEntry *v1alpha1.BackupEntry, opts v1.UpdateOptions) (*v1alpha1.BackupEntry, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.BackupEntry, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.BackupEntryList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BackupEntry, err error) + BackupEntryExpansion +} + +// backupEntries implements BackupEntryInterface +type backupEntries struct { + client rest.Interface + ns string +} + +// newBackupEntries returns a BackupEntries +func newBackupEntries(c *CoreV1alpha1Client, namespace string) *backupEntries { + return &backupEntries{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the backupEntry, and returns the corresponding backupEntry object, and an error if there is any. +func (c *backupEntries) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BackupEntry, err error) { + result = &v1alpha1.BackupEntry{} + err = c.client.Get(). + Namespace(c.ns). + Resource("backupentries"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of BackupEntries that match those selectors. +func (c *backupEntries) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.BackupEntryList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.BackupEntryList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("backupentries"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested backupEntries. +func (c *backupEntries) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("backupentries"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a backupEntry and creates it. Returns the server's representation of the backupEntry, and an error, if there is any. +func (c *backupEntries) Create(ctx context.Context, backupEntry *v1alpha1.BackupEntry, opts v1.CreateOptions) (result *v1alpha1.BackupEntry, err error) { + result = &v1alpha1.BackupEntry{} + err = c.client.Post(). + Namespace(c.ns). + Resource("backupentries"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupEntry). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a backupEntry and updates it. Returns the server's representation of the backupEntry, and an error, if there is any. +func (c *backupEntries) Update(ctx context.Context, backupEntry *v1alpha1.BackupEntry, opts v1.UpdateOptions) (result *v1alpha1.BackupEntry, err error) { + result = &v1alpha1.BackupEntry{} + err = c.client.Put(). + Namespace(c.ns). + Resource("backupentries"). + Name(backupEntry.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupEntry). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *backupEntries) UpdateStatus(ctx context.Context, backupEntry *v1alpha1.BackupEntry, opts v1.UpdateOptions) (result *v1alpha1.BackupEntry, err error) { + result = &v1alpha1.BackupEntry{} + err = c.client.Put(). + Namespace(c.ns). + Resource("backupentries"). + Name(backupEntry.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupEntry). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the backupEntry and deletes it. Returns an error if one occurs. +func (c *backupEntries) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("backupentries"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *backupEntries) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("backupentries"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched backupEntry. +func (c *backupEntries) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.BackupEntry, err error) { + result = &v1alpha1.BackupEntry{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("backupentries"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/cloudprofile.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/cloudprofile.go new file mode 100644 index 000000000..064d00471 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/cloudprofile.go @@ -0,0 +1,168 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CloudProfilesGetter has a method to return a CloudProfileInterface. +// A group's client should implement this interface. +type CloudProfilesGetter interface { + CloudProfiles() CloudProfileInterface +} + +// CloudProfileInterface has methods to work with CloudProfile resources. +type CloudProfileInterface interface { + Create(ctx context.Context, cloudProfile *v1alpha1.CloudProfile, opts v1.CreateOptions) (*v1alpha1.CloudProfile, error) + Update(ctx context.Context, cloudProfile *v1alpha1.CloudProfile, opts v1.UpdateOptions) (*v1alpha1.CloudProfile, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.CloudProfile, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.CloudProfileList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CloudProfile, err error) + CloudProfileExpansion +} + +// cloudProfiles implements CloudProfileInterface +type cloudProfiles struct { + client rest.Interface +} + +// newCloudProfiles returns a CloudProfiles +func newCloudProfiles(c *CoreV1alpha1Client) *cloudProfiles { + return &cloudProfiles{ + client: c.RESTClient(), + } +} + +// Get takes name of the cloudProfile, and returns the corresponding cloudProfile object, and an error if there is any. +func (c *cloudProfiles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CloudProfile, err error) { + result = &v1alpha1.CloudProfile{} + err = c.client.Get(). + Resource("cloudprofiles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CloudProfiles that match those selectors. +func (c *cloudProfiles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CloudProfileList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.CloudProfileList{} + err = c.client.Get(). + Resource("cloudprofiles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cloudProfiles. +func (c *cloudProfiles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("cloudprofiles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cloudProfile and creates it. Returns the server's representation of the cloudProfile, and an error, if there is any. +func (c *cloudProfiles) Create(ctx context.Context, cloudProfile *v1alpha1.CloudProfile, opts v1.CreateOptions) (result *v1alpha1.CloudProfile, err error) { + result = &v1alpha1.CloudProfile{} + err = c.client.Post(). + Resource("cloudprofiles"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cloudProfile). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cloudProfile and updates it. Returns the server's representation of the cloudProfile, and an error, if there is any. +func (c *cloudProfiles) Update(ctx context.Context, cloudProfile *v1alpha1.CloudProfile, opts v1.UpdateOptions) (result *v1alpha1.CloudProfile, err error) { + result = &v1alpha1.CloudProfile{} + err = c.client.Put(). + Resource("cloudprofiles"). + Name(cloudProfile.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cloudProfile). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cloudProfile and deletes it. Returns an error if one occurs. +func (c *cloudProfiles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("cloudprofiles"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cloudProfiles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("cloudprofiles"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cloudProfile. +func (c *cloudProfiles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CloudProfile, err error) { + result = &v1alpha1.CloudProfile{} + err = c.client.Patch(pt). + Resource("cloudprofiles"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/controllerinstallation.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/controllerinstallation.go new file mode 100644 index 000000000..219170efe --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/controllerinstallation.go @@ -0,0 +1,184 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ControllerInstallationsGetter has a method to return a ControllerInstallationInterface. +// A group's client should implement this interface. +type ControllerInstallationsGetter interface { + ControllerInstallations() ControllerInstallationInterface +} + +// ControllerInstallationInterface has methods to work with ControllerInstallation resources. +type ControllerInstallationInterface interface { + Create(ctx context.Context, controllerInstallation *v1alpha1.ControllerInstallation, opts v1.CreateOptions) (*v1alpha1.ControllerInstallation, error) + Update(ctx context.Context, controllerInstallation *v1alpha1.ControllerInstallation, opts v1.UpdateOptions) (*v1alpha1.ControllerInstallation, error) + UpdateStatus(ctx context.Context, controllerInstallation *v1alpha1.ControllerInstallation, opts v1.UpdateOptions) (*v1alpha1.ControllerInstallation, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ControllerInstallation, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ControllerInstallationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ControllerInstallation, err error) + ControllerInstallationExpansion +} + +// controllerInstallations implements ControllerInstallationInterface +type controllerInstallations struct { + client rest.Interface +} + +// newControllerInstallations returns a ControllerInstallations +func newControllerInstallations(c *CoreV1alpha1Client) *controllerInstallations { + return &controllerInstallations{ + client: c.RESTClient(), + } +} + +// Get takes name of the controllerInstallation, and returns the corresponding controllerInstallation object, and an error if there is any. +func (c *controllerInstallations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ControllerInstallation, err error) { + result = &v1alpha1.ControllerInstallation{} + err = c.client.Get(). + Resource("controllerinstallations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ControllerInstallations that match those selectors. +func (c *controllerInstallations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ControllerInstallationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ControllerInstallationList{} + err = c.client.Get(). + Resource("controllerinstallations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested controllerInstallations. +func (c *controllerInstallations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("controllerinstallations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a controllerInstallation and creates it. Returns the server's representation of the controllerInstallation, and an error, if there is any. +func (c *controllerInstallations) Create(ctx context.Context, controllerInstallation *v1alpha1.ControllerInstallation, opts v1.CreateOptions) (result *v1alpha1.ControllerInstallation, err error) { + result = &v1alpha1.ControllerInstallation{} + err = c.client.Post(). + Resource("controllerinstallations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(controllerInstallation). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a controllerInstallation and updates it. Returns the server's representation of the controllerInstallation, and an error, if there is any. +func (c *controllerInstallations) Update(ctx context.Context, controllerInstallation *v1alpha1.ControllerInstallation, opts v1.UpdateOptions) (result *v1alpha1.ControllerInstallation, err error) { + result = &v1alpha1.ControllerInstallation{} + err = c.client.Put(). + Resource("controllerinstallations"). + Name(controllerInstallation.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(controllerInstallation). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *controllerInstallations) UpdateStatus(ctx context.Context, controllerInstallation *v1alpha1.ControllerInstallation, opts v1.UpdateOptions) (result *v1alpha1.ControllerInstallation, err error) { + result = &v1alpha1.ControllerInstallation{} + err = c.client.Put(). + Resource("controllerinstallations"). + Name(controllerInstallation.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(controllerInstallation). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the controllerInstallation and deletes it. Returns an error if one occurs. +func (c *controllerInstallations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("controllerinstallations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *controllerInstallations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("controllerinstallations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched controllerInstallation. +func (c *controllerInstallations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ControllerInstallation, err error) { + result = &v1alpha1.ControllerInstallation{} + err = c.client.Patch(pt). + Resource("controllerinstallations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/controllerregistration.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/controllerregistration.go new file mode 100644 index 000000000..67890f524 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/controllerregistration.go @@ -0,0 +1,168 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ControllerRegistrationsGetter has a method to return a ControllerRegistrationInterface. +// A group's client should implement this interface. +type ControllerRegistrationsGetter interface { + ControllerRegistrations() ControllerRegistrationInterface +} + +// ControllerRegistrationInterface has methods to work with ControllerRegistration resources. +type ControllerRegistrationInterface interface { + Create(ctx context.Context, controllerRegistration *v1alpha1.ControllerRegistration, opts v1.CreateOptions) (*v1alpha1.ControllerRegistration, error) + Update(ctx context.Context, controllerRegistration *v1alpha1.ControllerRegistration, opts v1.UpdateOptions) (*v1alpha1.ControllerRegistration, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ControllerRegistration, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ControllerRegistrationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ControllerRegistration, err error) + ControllerRegistrationExpansion +} + +// controllerRegistrations implements ControllerRegistrationInterface +type controllerRegistrations struct { + client rest.Interface +} + +// newControllerRegistrations returns a ControllerRegistrations +func newControllerRegistrations(c *CoreV1alpha1Client) *controllerRegistrations { + return &controllerRegistrations{ + client: c.RESTClient(), + } +} + +// Get takes name of the controllerRegistration, and returns the corresponding controllerRegistration object, and an error if there is any. +func (c *controllerRegistrations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ControllerRegistration, err error) { + result = &v1alpha1.ControllerRegistration{} + err = c.client.Get(). + Resource("controllerregistrations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ControllerRegistrations that match those selectors. +func (c *controllerRegistrations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ControllerRegistrationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ControllerRegistrationList{} + err = c.client.Get(). + Resource("controllerregistrations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested controllerRegistrations. +func (c *controllerRegistrations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("controllerregistrations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a controllerRegistration and creates it. Returns the server's representation of the controllerRegistration, and an error, if there is any. +func (c *controllerRegistrations) Create(ctx context.Context, controllerRegistration *v1alpha1.ControllerRegistration, opts v1.CreateOptions) (result *v1alpha1.ControllerRegistration, err error) { + result = &v1alpha1.ControllerRegistration{} + err = c.client.Post(). + Resource("controllerregistrations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(controllerRegistration). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a controllerRegistration and updates it. Returns the server's representation of the controllerRegistration, and an error, if there is any. +func (c *controllerRegistrations) Update(ctx context.Context, controllerRegistration *v1alpha1.ControllerRegistration, opts v1.UpdateOptions) (result *v1alpha1.ControllerRegistration, err error) { + result = &v1alpha1.ControllerRegistration{} + err = c.client.Put(). + Resource("controllerregistrations"). + Name(controllerRegistration.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(controllerRegistration). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the controllerRegistration and deletes it. Returns an error if one occurs. +func (c *controllerRegistrations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("controllerregistrations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *controllerRegistrations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("controllerregistrations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched controllerRegistration. +func (c *controllerRegistrations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ControllerRegistration, err error) { + result = &v1alpha1.ControllerRegistration{} + err = c.client.Patch(pt). + Resource("controllerregistrations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/core_client.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/core_client.go new file mode 100644 index 000000000..3d607254c --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/core_client.go @@ -0,0 +1,144 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1" + "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type CoreV1alpha1Interface interface { + RESTClient() rest.Interface + BackupBucketsGetter + BackupEntriesGetter + CloudProfilesGetter + ControllerInstallationsGetter + ControllerRegistrationsGetter + PlantsGetter + ProjectsGetter + QuotasGetter + SecretBindingsGetter + SeedsGetter + ShootsGetter + ShootStatesGetter +} + +// CoreV1alpha1Client is used to interact with features provided by the core.gardener.cloud group. +type CoreV1alpha1Client struct { + restClient rest.Interface +} + +func (c *CoreV1alpha1Client) BackupBuckets() BackupBucketInterface { + return newBackupBuckets(c) +} + +func (c *CoreV1alpha1Client) BackupEntries(namespace string) BackupEntryInterface { + return newBackupEntries(c, namespace) +} + +func (c *CoreV1alpha1Client) CloudProfiles() CloudProfileInterface { + return newCloudProfiles(c) +} + +func (c *CoreV1alpha1Client) ControllerInstallations() ControllerInstallationInterface { + return newControllerInstallations(c) +} + +func (c *CoreV1alpha1Client) ControllerRegistrations() ControllerRegistrationInterface { + return newControllerRegistrations(c) +} + +func (c *CoreV1alpha1Client) Plants(namespace string) PlantInterface { + return newPlants(c, namespace) +} + +func (c *CoreV1alpha1Client) Projects() ProjectInterface { + return newProjects(c) +} + +func (c *CoreV1alpha1Client) Quotas(namespace string) QuotaInterface { + return newQuotas(c, namespace) +} + +func (c *CoreV1alpha1Client) SecretBindings(namespace string) SecretBindingInterface { + return newSecretBindings(c, namespace) +} + +func (c *CoreV1alpha1Client) Seeds() SeedInterface { + return newSeeds(c) +} + +func (c *CoreV1alpha1Client) Shoots(namespace string) ShootInterface { + return newShoots(c, namespace) +} + +func (c *CoreV1alpha1Client) ShootStates(namespace string) ShootStateInterface { + return newShootStates(c, namespace) +} + +// NewForConfig creates a new CoreV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*CoreV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new CoreV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CoreV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *CoreV1alpha1Client { + return &CoreV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/doc.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/doc.go new file mode 100644 index 000000000..828c8ebe1 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/generated_expansion.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..89d962787 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/generated_expansion.go @@ -0,0 +1,43 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type BackupBucketExpansion interface{} + +type BackupEntryExpansion interface{} + +type CloudProfileExpansion interface{} + +type ControllerInstallationExpansion interface{} + +type ControllerRegistrationExpansion interface{} + +type PlantExpansion interface{} + +type ProjectExpansion interface{} + +type QuotaExpansion interface{} + +type SecretBindingExpansion interface{} + +type SeedExpansion interface{} + +type ShootExpansion interface{} + +type ShootStateExpansion interface{} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/plant.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/plant.go new file mode 100644 index 000000000..2c5fb0c7f --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/plant.go @@ -0,0 +1,195 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PlantsGetter has a method to return a PlantInterface. +// A group's client should implement this interface. +type PlantsGetter interface { + Plants(namespace string) PlantInterface +} + +// PlantInterface has methods to work with Plant resources. +type PlantInterface interface { + Create(ctx context.Context, plant *v1alpha1.Plant, opts v1.CreateOptions) (*v1alpha1.Plant, error) + Update(ctx context.Context, plant *v1alpha1.Plant, opts v1.UpdateOptions) (*v1alpha1.Plant, error) + UpdateStatus(ctx context.Context, plant *v1alpha1.Plant, opts v1.UpdateOptions) (*v1alpha1.Plant, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Plant, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PlantList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Plant, err error) + PlantExpansion +} + +// plants implements PlantInterface +type plants struct { + client rest.Interface + ns string +} + +// newPlants returns a Plants +func newPlants(c *CoreV1alpha1Client, namespace string) *plants { + return &plants{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the plant, and returns the corresponding plant object, and an error if there is any. +func (c *plants) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Plant, err error) { + result = &v1alpha1.Plant{} + err = c.client.Get(). + Namespace(c.ns). + Resource("plants"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Plants that match those selectors. +func (c *plants) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PlantList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.PlantList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("plants"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested plants. +func (c *plants) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("plants"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a plant and creates it. Returns the server's representation of the plant, and an error, if there is any. +func (c *plants) Create(ctx context.Context, plant *v1alpha1.Plant, opts v1.CreateOptions) (result *v1alpha1.Plant, err error) { + result = &v1alpha1.Plant{} + err = c.client.Post(). + Namespace(c.ns). + Resource("plants"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(plant). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a plant and updates it. Returns the server's representation of the plant, and an error, if there is any. +func (c *plants) Update(ctx context.Context, plant *v1alpha1.Plant, opts v1.UpdateOptions) (result *v1alpha1.Plant, err error) { + result = &v1alpha1.Plant{} + err = c.client.Put(). + Namespace(c.ns). + Resource("plants"). + Name(plant.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(plant). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *plants) UpdateStatus(ctx context.Context, plant *v1alpha1.Plant, opts v1.UpdateOptions) (result *v1alpha1.Plant, err error) { + result = &v1alpha1.Plant{} + err = c.client.Put(). + Namespace(c.ns). + Resource("plants"). + Name(plant.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(plant). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the plant and deletes it. Returns an error if one occurs. +func (c *plants) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("plants"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *plants) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("plants"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched plant. +func (c *plants) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Plant, err error) { + result = &v1alpha1.Plant{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("plants"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/project.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/project.go new file mode 100644 index 000000000..a42bf152e --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/project.go @@ -0,0 +1,184 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ProjectsGetter has a method to return a ProjectInterface. +// A group's client should implement this interface. +type ProjectsGetter interface { + Projects() ProjectInterface +} + +// ProjectInterface has methods to work with Project resources. +type ProjectInterface interface { + Create(ctx context.Context, project *v1alpha1.Project, opts v1.CreateOptions) (*v1alpha1.Project, error) + Update(ctx context.Context, project *v1alpha1.Project, opts v1.UpdateOptions) (*v1alpha1.Project, error) + UpdateStatus(ctx context.Context, project *v1alpha1.Project, opts v1.UpdateOptions) (*v1alpha1.Project, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Project, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ProjectList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Project, err error) + ProjectExpansion +} + +// projects implements ProjectInterface +type projects struct { + client rest.Interface +} + +// newProjects returns a Projects +func newProjects(c *CoreV1alpha1Client) *projects { + return &projects{ + client: c.RESTClient(), + } +} + +// Get takes name of the project, and returns the corresponding project object, and an error if there is any. +func (c *projects) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Project, err error) { + result = &v1alpha1.Project{} + err = c.client.Get(). + Resource("projects"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Projects that match those selectors. +func (c *projects) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ProjectList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ProjectList{} + err = c.client.Get(). + Resource("projects"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested projects. +func (c *projects) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("projects"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a project and creates it. Returns the server's representation of the project, and an error, if there is any. +func (c *projects) Create(ctx context.Context, project *v1alpha1.Project, opts v1.CreateOptions) (result *v1alpha1.Project, err error) { + result = &v1alpha1.Project{} + err = c.client.Post(). + Resource("projects"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(project). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a project and updates it. Returns the server's representation of the project, and an error, if there is any. +func (c *projects) Update(ctx context.Context, project *v1alpha1.Project, opts v1.UpdateOptions) (result *v1alpha1.Project, err error) { + result = &v1alpha1.Project{} + err = c.client.Put(). + Resource("projects"). + Name(project.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(project). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *projects) UpdateStatus(ctx context.Context, project *v1alpha1.Project, opts v1.UpdateOptions) (result *v1alpha1.Project, err error) { + result = &v1alpha1.Project{} + err = c.client.Put(). + Resource("projects"). + Name(project.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(project). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the project and deletes it. Returns an error if one occurs. +func (c *projects) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("projects"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *projects) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("projects"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched project. +func (c *projects) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Project, err error) { + result = &v1alpha1.Project{} + err = c.client.Patch(pt). + Resource("projects"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/quota.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/quota.go new file mode 100644 index 000000000..1bd530ef1 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/quota.go @@ -0,0 +1,178 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// QuotasGetter has a method to return a QuotaInterface. +// A group's client should implement this interface. +type QuotasGetter interface { + Quotas(namespace string) QuotaInterface +} + +// QuotaInterface has methods to work with Quota resources. +type QuotaInterface interface { + Create(ctx context.Context, quota *v1alpha1.Quota, opts v1.CreateOptions) (*v1alpha1.Quota, error) + Update(ctx context.Context, quota *v1alpha1.Quota, opts v1.UpdateOptions) (*v1alpha1.Quota, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Quota, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.QuotaList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Quota, err error) + QuotaExpansion +} + +// quotas implements QuotaInterface +type quotas struct { + client rest.Interface + ns string +} + +// newQuotas returns a Quotas +func newQuotas(c *CoreV1alpha1Client, namespace string) *quotas { + return "as{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the quota, and returns the corresponding quota object, and an error if there is any. +func (c *quotas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Quota, err error) { + result = &v1alpha1.Quota{} + err = c.client.Get(). + Namespace(c.ns). + Resource("quotas"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Quotas that match those selectors. +func (c *quotas) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.QuotaList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.QuotaList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("quotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested quotas. +func (c *quotas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("quotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a quota and creates it. Returns the server's representation of the quota, and an error, if there is any. +func (c *quotas) Create(ctx context.Context, quota *v1alpha1.Quota, opts v1.CreateOptions) (result *v1alpha1.Quota, err error) { + result = &v1alpha1.Quota{} + err = c.client.Post(). + Namespace(c.ns). + Resource("quotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(quota). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a quota and updates it. Returns the server's representation of the quota, and an error, if there is any. +func (c *quotas) Update(ctx context.Context, quota *v1alpha1.Quota, opts v1.UpdateOptions) (result *v1alpha1.Quota, err error) { + result = &v1alpha1.Quota{} + err = c.client.Put(). + Namespace(c.ns). + Resource("quotas"). + Name(quota.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(quota). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the quota and deletes it. Returns an error if one occurs. +func (c *quotas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("quotas"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *quotas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("quotas"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched quota. +func (c *quotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Quota, err error) { + result = &v1alpha1.Quota{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("quotas"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/secretbinding.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/secretbinding.go new file mode 100644 index 000000000..0798e053b --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/secretbinding.go @@ -0,0 +1,178 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// SecretBindingsGetter has a method to return a SecretBindingInterface. +// A group's client should implement this interface. +type SecretBindingsGetter interface { + SecretBindings(namespace string) SecretBindingInterface +} + +// SecretBindingInterface has methods to work with SecretBinding resources. +type SecretBindingInterface interface { + Create(ctx context.Context, secretBinding *v1alpha1.SecretBinding, opts v1.CreateOptions) (*v1alpha1.SecretBinding, error) + Update(ctx context.Context, secretBinding *v1alpha1.SecretBinding, opts v1.UpdateOptions) (*v1alpha1.SecretBinding, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.SecretBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.SecretBindingList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.SecretBinding, err error) + SecretBindingExpansion +} + +// secretBindings implements SecretBindingInterface +type secretBindings struct { + client rest.Interface + ns string +} + +// newSecretBindings returns a SecretBindings +func newSecretBindings(c *CoreV1alpha1Client, namespace string) *secretBindings { + return &secretBindings{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the secretBinding, and returns the corresponding secretBinding object, and an error if there is any. +func (c *secretBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.SecretBinding, err error) { + result = &v1alpha1.SecretBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secretbindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of SecretBindings that match those selectors. +func (c *secretBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.SecretBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.SecretBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secretbindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested secretBindings. +func (c *secretBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("secretbindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a secretBinding and creates it. Returns the server's representation of the secretBinding, and an error, if there is any. +func (c *secretBindings) Create(ctx context.Context, secretBinding *v1alpha1.SecretBinding, opts v1.CreateOptions) (result *v1alpha1.SecretBinding, err error) { + result = &v1alpha1.SecretBinding{} + err = c.client.Post(). + Namespace(c.ns). + Resource("secretbindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(secretBinding). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a secretBinding and updates it. Returns the server's representation of the secretBinding, and an error, if there is any. +func (c *secretBindings) Update(ctx context.Context, secretBinding *v1alpha1.SecretBinding, opts v1.UpdateOptions) (result *v1alpha1.SecretBinding, err error) { + result = &v1alpha1.SecretBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("secretbindings"). + Name(secretBinding.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(secretBinding). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the secretBinding and deletes it. Returns an error if one occurs. +func (c *secretBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("secretbindings"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *secretBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("secretbindings"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched secretBinding. +func (c *secretBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.SecretBinding, err error) { + result = &v1alpha1.SecretBinding{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("secretbindings"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/seed.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/seed.go new file mode 100644 index 000000000..97dee9344 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/seed.go @@ -0,0 +1,184 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// SeedsGetter has a method to return a SeedInterface. +// A group's client should implement this interface. +type SeedsGetter interface { + Seeds() SeedInterface +} + +// SeedInterface has methods to work with Seed resources. +type SeedInterface interface { + Create(ctx context.Context, seed *v1alpha1.Seed, opts v1.CreateOptions) (*v1alpha1.Seed, error) + Update(ctx context.Context, seed *v1alpha1.Seed, opts v1.UpdateOptions) (*v1alpha1.Seed, error) + UpdateStatus(ctx context.Context, seed *v1alpha1.Seed, opts v1.UpdateOptions) (*v1alpha1.Seed, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Seed, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.SeedList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Seed, err error) + SeedExpansion +} + +// seeds implements SeedInterface +type seeds struct { + client rest.Interface +} + +// newSeeds returns a Seeds +func newSeeds(c *CoreV1alpha1Client) *seeds { + return &seeds{ + client: c.RESTClient(), + } +} + +// Get takes name of the seed, and returns the corresponding seed object, and an error if there is any. +func (c *seeds) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Seed, err error) { + result = &v1alpha1.Seed{} + err = c.client.Get(). + Resource("seeds"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Seeds that match those selectors. +func (c *seeds) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.SeedList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.SeedList{} + err = c.client.Get(). + Resource("seeds"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested seeds. +func (c *seeds) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("seeds"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a seed and creates it. Returns the server's representation of the seed, and an error, if there is any. +func (c *seeds) Create(ctx context.Context, seed *v1alpha1.Seed, opts v1.CreateOptions) (result *v1alpha1.Seed, err error) { + result = &v1alpha1.Seed{} + err = c.client.Post(). + Resource("seeds"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(seed). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a seed and updates it. Returns the server's representation of the seed, and an error, if there is any. +func (c *seeds) Update(ctx context.Context, seed *v1alpha1.Seed, opts v1.UpdateOptions) (result *v1alpha1.Seed, err error) { + result = &v1alpha1.Seed{} + err = c.client.Put(). + Resource("seeds"). + Name(seed.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(seed). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *seeds) UpdateStatus(ctx context.Context, seed *v1alpha1.Seed, opts v1.UpdateOptions) (result *v1alpha1.Seed, err error) { + result = &v1alpha1.Seed{} + err = c.client.Put(). + Resource("seeds"). + Name(seed.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(seed). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the seed and deletes it. Returns an error if one occurs. +func (c *seeds) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("seeds"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *seeds) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("seeds"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched seed. +func (c *seeds) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Seed, err error) { + result = &v1alpha1.Seed{} + err = c.client.Patch(pt). + Resource("seeds"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/shoot.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/shoot.go new file mode 100644 index 000000000..5ff105c72 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/shoot.go @@ -0,0 +1,195 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ShootsGetter has a method to return a ShootInterface. +// A group's client should implement this interface. +type ShootsGetter interface { + Shoots(namespace string) ShootInterface +} + +// ShootInterface has methods to work with Shoot resources. +type ShootInterface interface { + Create(ctx context.Context, shoot *v1alpha1.Shoot, opts v1.CreateOptions) (*v1alpha1.Shoot, error) + Update(ctx context.Context, shoot *v1alpha1.Shoot, opts v1.UpdateOptions) (*v1alpha1.Shoot, error) + UpdateStatus(ctx context.Context, shoot *v1alpha1.Shoot, opts v1.UpdateOptions) (*v1alpha1.Shoot, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Shoot, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ShootList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Shoot, err error) + ShootExpansion +} + +// shoots implements ShootInterface +type shoots struct { + client rest.Interface + ns string +} + +// newShoots returns a Shoots +func newShoots(c *CoreV1alpha1Client, namespace string) *shoots { + return &shoots{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the shoot, and returns the corresponding shoot object, and an error if there is any. +func (c *shoots) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Shoot, err error) { + result = &v1alpha1.Shoot{} + err = c.client.Get(). + Namespace(c.ns). + Resource("shoots"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Shoots that match those selectors. +func (c *shoots) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ShootList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ShootList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("shoots"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested shoots. +func (c *shoots) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("shoots"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a shoot and creates it. Returns the server's representation of the shoot, and an error, if there is any. +func (c *shoots) Create(ctx context.Context, shoot *v1alpha1.Shoot, opts v1.CreateOptions) (result *v1alpha1.Shoot, err error) { + result = &v1alpha1.Shoot{} + err = c.client.Post(). + Namespace(c.ns). + Resource("shoots"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(shoot). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a shoot and updates it. Returns the server's representation of the shoot, and an error, if there is any. +func (c *shoots) Update(ctx context.Context, shoot *v1alpha1.Shoot, opts v1.UpdateOptions) (result *v1alpha1.Shoot, err error) { + result = &v1alpha1.Shoot{} + err = c.client.Put(). + Namespace(c.ns). + Resource("shoots"). + Name(shoot.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(shoot). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *shoots) UpdateStatus(ctx context.Context, shoot *v1alpha1.Shoot, opts v1.UpdateOptions) (result *v1alpha1.Shoot, err error) { + result = &v1alpha1.Shoot{} + err = c.client.Put(). + Namespace(c.ns). + Resource("shoots"). + Name(shoot.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(shoot). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the shoot and deletes it. Returns an error if one occurs. +func (c *shoots) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("shoots"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *shoots) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("shoots"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched shoot. +func (c *shoots) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Shoot, err error) { + result = &v1alpha1.Shoot{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("shoots"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/shootstate.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/shootstate.go new file mode 100644 index 000000000..2a28e0a19 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1alpha1/shootstate.go @@ -0,0 +1,178 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ShootStatesGetter has a method to return a ShootStateInterface. +// A group's client should implement this interface. +type ShootStatesGetter interface { + ShootStates(namespace string) ShootStateInterface +} + +// ShootStateInterface has methods to work with ShootState resources. +type ShootStateInterface interface { + Create(ctx context.Context, shootState *v1alpha1.ShootState, opts v1.CreateOptions) (*v1alpha1.ShootState, error) + Update(ctx context.Context, shootState *v1alpha1.ShootState, opts v1.UpdateOptions) (*v1alpha1.ShootState, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ShootState, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ShootStateList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ShootState, err error) + ShootStateExpansion +} + +// shootStates implements ShootStateInterface +type shootStates struct { + client rest.Interface + ns string +} + +// newShootStates returns a ShootStates +func newShootStates(c *CoreV1alpha1Client, namespace string) *shootStates { + return &shootStates{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the shootState, and returns the corresponding shootState object, and an error if there is any. +func (c *shootStates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ShootState, err error) { + result = &v1alpha1.ShootState{} + err = c.client.Get(). + Namespace(c.ns). + Resource("shootstates"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ShootStates that match those selectors. +func (c *shootStates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ShootStateList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ShootStateList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("shootstates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested shootStates. +func (c *shootStates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("shootstates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a shootState and creates it. Returns the server's representation of the shootState, and an error, if there is any. +func (c *shootStates) Create(ctx context.Context, shootState *v1alpha1.ShootState, opts v1.CreateOptions) (result *v1alpha1.ShootState, err error) { + result = &v1alpha1.ShootState{} + err = c.client.Post(). + Namespace(c.ns). + Resource("shootstates"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(shootState). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a shootState and updates it. Returns the server's representation of the shootState, and an error, if there is any. +func (c *shootStates) Update(ctx context.Context, shootState *v1alpha1.ShootState, opts v1.UpdateOptions) (result *v1alpha1.ShootState, err error) { + result = &v1alpha1.ShootState{} + err = c.client.Put(). + Namespace(c.ns). + Resource("shootstates"). + Name(shootState.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(shootState). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the shootState and deletes it. Returns an error if one occurs. +func (c *shootStates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("shootstates"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *shootStates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("shootstates"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched shootState. +func (c *shootStates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ShootState, err error) { + result = &v1alpha1.ShootState{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("shootstates"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/backupbucket.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/backupbucket.go new file mode 100644 index 000000000..a7238c9a6 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/backupbucket.go @@ -0,0 +1,184 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// BackupBucketsGetter has a method to return a BackupBucketInterface. +// A group's client should implement this interface. +type BackupBucketsGetter interface { + BackupBuckets() BackupBucketInterface +} + +// BackupBucketInterface has methods to work with BackupBucket resources. +type BackupBucketInterface interface { + Create(ctx context.Context, backupBucket *v1beta1.BackupBucket, opts v1.CreateOptions) (*v1beta1.BackupBucket, error) + Update(ctx context.Context, backupBucket *v1beta1.BackupBucket, opts v1.UpdateOptions) (*v1beta1.BackupBucket, error) + UpdateStatus(ctx context.Context, backupBucket *v1beta1.BackupBucket, opts v1.UpdateOptions) (*v1beta1.BackupBucket, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.BackupBucket, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.BackupBucketList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.BackupBucket, err error) + BackupBucketExpansion +} + +// backupBuckets implements BackupBucketInterface +type backupBuckets struct { + client rest.Interface +} + +// newBackupBuckets returns a BackupBuckets +func newBackupBuckets(c *CoreV1beta1Client) *backupBuckets { + return &backupBuckets{ + client: c.RESTClient(), + } +} + +// Get takes name of the backupBucket, and returns the corresponding backupBucket object, and an error if there is any. +func (c *backupBuckets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.BackupBucket, err error) { + result = &v1beta1.BackupBucket{} + err = c.client.Get(). + Resource("backupbuckets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of BackupBuckets that match those selectors. +func (c *backupBuckets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.BackupBucketList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.BackupBucketList{} + err = c.client.Get(). + Resource("backupbuckets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested backupBuckets. +func (c *backupBuckets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("backupbuckets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a backupBucket and creates it. Returns the server's representation of the backupBucket, and an error, if there is any. +func (c *backupBuckets) Create(ctx context.Context, backupBucket *v1beta1.BackupBucket, opts v1.CreateOptions) (result *v1beta1.BackupBucket, err error) { + result = &v1beta1.BackupBucket{} + err = c.client.Post(). + Resource("backupbuckets"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupBucket). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a backupBucket and updates it. Returns the server's representation of the backupBucket, and an error, if there is any. +func (c *backupBuckets) Update(ctx context.Context, backupBucket *v1beta1.BackupBucket, opts v1.UpdateOptions) (result *v1beta1.BackupBucket, err error) { + result = &v1beta1.BackupBucket{} + err = c.client.Put(). + Resource("backupbuckets"). + Name(backupBucket.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupBucket). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *backupBuckets) UpdateStatus(ctx context.Context, backupBucket *v1beta1.BackupBucket, opts v1.UpdateOptions) (result *v1beta1.BackupBucket, err error) { + result = &v1beta1.BackupBucket{} + err = c.client.Put(). + Resource("backupbuckets"). + Name(backupBucket.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupBucket). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the backupBucket and deletes it. Returns an error if one occurs. +func (c *backupBuckets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("backupbuckets"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *backupBuckets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("backupbuckets"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched backupBucket. +func (c *backupBuckets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.BackupBucket, err error) { + result = &v1beta1.BackupBucket{} + err = c.client.Patch(pt). + Resource("backupbuckets"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/backupentry.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/backupentry.go new file mode 100644 index 000000000..13880f074 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/backupentry.go @@ -0,0 +1,195 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// BackupEntriesGetter has a method to return a BackupEntryInterface. +// A group's client should implement this interface. +type BackupEntriesGetter interface { + BackupEntries(namespace string) BackupEntryInterface +} + +// BackupEntryInterface has methods to work with BackupEntry resources. +type BackupEntryInterface interface { + Create(ctx context.Context, backupEntry *v1beta1.BackupEntry, opts v1.CreateOptions) (*v1beta1.BackupEntry, error) + Update(ctx context.Context, backupEntry *v1beta1.BackupEntry, opts v1.UpdateOptions) (*v1beta1.BackupEntry, error) + UpdateStatus(ctx context.Context, backupEntry *v1beta1.BackupEntry, opts v1.UpdateOptions) (*v1beta1.BackupEntry, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.BackupEntry, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.BackupEntryList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.BackupEntry, err error) + BackupEntryExpansion +} + +// backupEntries implements BackupEntryInterface +type backupEntries struct { + client rest.Interface + ns string +} + +// newBackupEntries returns a BackupEntries +func newBackupEntries(c *CoreV1beta1Client, namespace string) *backupEntries { + return &backupEntries{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the backupEntry, and returns the corresponding backupEntry object, and an error if there is any. +func (c *backupEntries) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.BackupEntry, err error) { + result = &v1beta1.BackupEntry{} + err = c.client.Get(). + Namespace(c.ns). + Resource("backupentries"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of BackupEntries that match those selectors. +func (c *backupEntries) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.BackupEntryList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.BackupEntryList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("backupentries"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested backupEntries. +func (c *backupEntries) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("backupentries"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a backupEntry and creates it. Returns the server's representation of the backupEntry, and an error, if there is any. +func (c *backupEntries) Create(ctx context.Context, backupEntry *v1beta1.BackupEntry, opts v1.CreateOptions) (result *v1beta1.BackupEntry, err error) { + result = &v1beta1.BackupEntry{} + err = c.client.Post(). + Namespace(c.ns). + Resource("backupentries"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupEntry). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a backupEntry and updates it. Returns the server's representation of the backupEntry, and an error, if there is any. +func (c *backupEntries) Update(ctx context.Context, backupEntry *v1beta1.BackupEntry, opts v1.UpdateOptions) (result *v1beta1.BackupEntry, err error) { + result = &v1beta1.BackupEntry{} + err = c.client.Put(). + Namespace(c.ns). + Resource("backupentries"). + Name(backupEntry.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupEntry). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *backupEntries) UpdateStatus(ctx context.Context, backupEntry *v1beta1.BackupEntry, opts v1.UpdateOptions) (result *v1beta1.BackupEntry, err error) { + result = &v1beta1.BackupEntry{} + err = c.client.Put(). + Namespace(c.ns). + Resource("backupentries"). + Name(backupEntry.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(backupEntry). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the backupEntry and deletes it. Returns an error if one occurs. +func (c *backupEntries) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("backupentries"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *backupEntries) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("backupentries"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched backupEntry. +func (c *backupEntries) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.BackupEntry, err error) { + result = &v1beta1.BackupEntry{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("backupentries"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/cloudprofile.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/cloudprofile.go new file mode 100644 index 000000000..37b7cab27 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/cloudprofile.go @@ -0,0 +1,168 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CloudProfilesGetter has a method to return a CloudProfileInterface. +// A group's client should implement this interface. +type CloudProfilesGetter interface { + CloudProfiles() CloudProfileInterface +} + +// CloudProfileInterface has methods to work with CloudProfile resources. +type CloudProfileInterface interface { + Create(ctx context.Context, cloudProfile *v1beta1.CloudProfile, opts v1.CreateOptions) (*v1beta1.CloudProfile, error) + Update(ctx context.Context, cloudProfile *v1beta1.CloudProfile, opts v1.UpdateOptions) (*v1beta1.CloudProfile, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CloudProfile, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CloudProfileList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CloudProfile, err error) + CloudProfileExpansion +} + +// cloudProfiles implements CloudProfileInterface +type cloudProfiles struct { + client rest.Interface +} + +// newCloudProfiles returns a CloudProfiles +func newCloudProfiles(c *CoreV1beta1Client) *cloudProfiles { + return &cloudProfiles{ + client: c.RESTClient(), + } +} + +// Get takes name of the cloudProfile, and returns the corresponding cloudProfile object, and an error if there is any. +func (c *cloudProfiles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CloudProfile, err error) { + result = &v1beta1.CloudProfile{} + err = c.client.Get(). + Resource("cloudprofiles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CloudProfiles that match those selectors. +func (c *cloudProfiles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CloudProfileList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.CloudProfileList{} + err = c.client.Get(). + Resource("cloudprofiles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cloudProfiles. +func (c *cloudProfiles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("cloudprofiles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cloudProfile and creates it. Returns the server's representation of the cloudProfile, and an error, if there is any. +func (c *cloudProfiles) Create(ctx context.Context, cloudProfile *v1beta1.CloudProfile, opts v1.CreateOptions) (result *v1beta1.CloudProfile, err error) { + result = &v1beta1.CloudProfile{} + err = c.client.Post(). + Resource("cloudprofiles"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cloudProfile). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cloudProfile and updates it. Returns the server's representation of the cloudProfile, and an error, if there is any. +func (c *cloudProfiles) Update(ctx context.Context, cloudProfile *v1beta1.CloudProfile, opts v1.UpdateOptions) (result *v1beta1.CloudProfile, err error) { + result = &v1beta1.CloudProfile{} + err = c.client.Put(). + Resource("cloudprofiles"). + Name(cloudProfile.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cloudProfile). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cloudProfile and deletes it. Returns an error if one occurs. +func (c *cloudProfiles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("cloudprofiles"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cloudProfiles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("cloudprofiles"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cloudProfile. +func (c *cloudProfiles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CloudProfile, err error) { + result = &v1beta1.CloudProfile{} + err = c.client.Patch(pt). + Resource("cloudprofiles"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/controllerinstallation.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/controllerinstallation.go new file mode 100644 index 000000000..5756e8467 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/controllerinstallation.go @@ -0,0 +1,184 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ControllerInstallationsGetter has a method to return a ControllerInstallationInterface. +// A group's client should implement this interface. +type ControllerInstallationsGetter interface { + ControllerInstallations() ControllerInstallationInterface +} + +// ControllerInstallationInterface has methods to work with ControllerInstallation resources. +type ControllerInstallationInterface interface { + Create(ctx context.Context, controllerInstallation *v1beta1.ControllerInstallation, opts v1.CreateOptions) (*v1beta1.ControllerInstallation, error) + Update(ctx context.Context, controllerInstallation *v1beta1.ControllerInstallation, opts v1.UpdateOptions) (*v1beta1.ControllerInstallation, error) + UpdateStatus(ctx context.Context, controllerInstallation *v1beta1.ControllerInstallation, opts v1.UpdateOptions) (*v1beta1.ControllerInstallation, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ControllerInstallation, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ControllerInstallationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerInstallation, err error) + ControllerInstallationExpansion +} + +// controllerInstallations implements ControllerInstallationInterface +type controllerInstallations struct { + client rest.Interface +} + +// newControllerInstallations returns a ControllerInstallations +func newControllerInstallations(c *CoreV1beta1Client) *controllerInstallations { + return &controllerInstallations{ + client: c.RESTClient(), + } +} + +// Get takes name of the controllerInstallation, and returns the corresponding controllerInstallation object, and an error if there is any. +func (c *controllerInstallations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ControllerInstallation, err error) { + result = &v1beta1.ControllerInstallation{} + err = c.client.Get(). + Resource("controllerinstallations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ControllerInstallations that match those selectors. +func (c *controllerInstallations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ControllerInstallationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.ControllerInstallationList{} + err = c.client.Get(). + Resource("controllerinstallations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested controllerInstallations. +func (c *controllerInstallations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("controllerinstallations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a controllerInstallation and creates it. Returns the server's representation of the controllerInstallation, and an error, if there is any. +func (c *controllerInstallations) Create(ctx context.Context, controllerInstallation *v1beta1.ControllerInstallation, opts v1.CreateOptions) (result *v1beta1.ControllerInstallation, err error) { + result = &v1beta1.ControllerInstallation{} + err = c.client.Post(). + Resource("controllerinstallations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(controllerInstallation). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a controllerInstallation and updates it. Returns the server's representation of the controllerInstallation, and an error, if there is any. +func (c *controllerInstallations) Update(ctx context.Context, controllerInstallation *v1beta1.ControllerInstallation, opts v1.UpdateOptions) (result *v1beta1.ControllerInstallation, err error) { + result = &v1beta1.ControllerInstallation{} + err = c.client.Put(). + Resource("controllerinstallations"). + Name(controllerInstallation.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(controllerInstallation). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *controllerInstallations) UpdateStatus(ctx context.Context, controllerInstallation *v1beta1.ControllerInstallation, opts v1.UpdateOptions) (result *v1beta1.ControllerInstallation, err error) { + result = &v1beta1.ControllerInstallation{} + err = c.client.Put(). + Resource("controllerinstallations"). + Name(controllerInstallation.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(controllerInstallation). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the controllerInstallation and deletes it. Returns an error if one occurs. +func (c *controllerInstallations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("controllerinstallations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *controllerInstallations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("controllerinstallations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched controllerInstallation. +func (c *controllerInstallations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerInstallation, err error) { + result = &v1beta1.ControllerInstallation{} + err = c.client.Patch(pt). + Resource("controllerinstallations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/controllerregistration.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/controllerregistration.go new file mode 100644 index 000000000..c7f66b209 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/controllerregistration.go @@ -0,0 +1,168 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ControllerRegistrationsGetter has a method to return a ControllerRegistrationInterface. +// A group's client should implement this interface. +type ControllerRegistrationsGetter interface { + ControllerRegistrations() ControllerRegistrationInterface +} + +// ControllerRegistrationInterface has methods to work with ControllerRegistration resources. +type ControllerRegistrationInterface interface { + Create(ctx context.Context, controllerRegistration *v1beta1.ControllerRegistration, opts v1.CreateOptions) (*v1beta1.ControllerRegistration, error) + Update(ctx context.Context, controllerRegistration *v1beta1.ControllerRegistration, opts v1.UpdateOptions) (*v1beta1.ControllerRegistration, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ControllerRegistration, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ControllerRegistrationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRegistration, err error) + ControllerRegistrationExpansion +} + +// controllerRegistrations implements ControllerRegistrationInterface +type controllerRegistrations struct { + client rest.Interface +} + +// newControllerRegistrations returns a ControllerRegistrations +func newControllerRegistrations(c *CoreV1beta1Client) *controllerRegistrations { + return &controllerRegistrations{ + client: c.RESTClient(), + } +} + +// Get takes name of the controllerRegistration, and returns the corresponding controllerRegistration object, and an error if there is any. +func (c *controllerRegistrations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ControllerRegistration, err error) { + result = &v1beta1.ControllerRegistration{} + err = c.client.Get(). + Resource("controllerregistrations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ControllerRegistrations that match those selectors. +func (c *controllerRegistrations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ControllerRegistrationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.ControllerRegistrationList{} + err = c.client.Get(). + Resource("controllerregistrations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested controllerRegistrations. +func (c *controllerRegistrations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("controllerregistrations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a controllerRegistration and creates it. Returns the server's representation of the controllerRegistration, and an error, if there is any. +func (c *controllerRegistrations) Create(ctx context.Context, controllerRegistration *v1beta1.ControllerRegistration, opts v1.CreateOptions) (result *v1beta1.ControllerRegistration, err error) { + result = &v1beta1.ControllerRegistration{} + err = c.client.Post(). + Resource("controllerregistrations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(controllerRegistration). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a controllerRegistration and updates it. Returns the server's representation of the controllerRegistration, and an error, if there is any. +func (c *controllerRegistrations) Update(ctx context.Context, controllerRegistration *v1beta1.ControllerRegistration, opts v1.UpdateOptions) (result *v1beta1.ControllerRegistration, err error) { + result = &v1beta1.ControllerRegistration{} + err = c.client.Put(). + Resource("controllerregistrations"). + Name(controllerRegistration.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(controllerRegistration). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the controllerRegistration and deletes it. Returns an error if one occurs. +func (c *controllerRegistrations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("controllerregistrations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *controllerRegistrations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("controllerregistrations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched controllerRegistration. +func (c *controllerRegistrations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRegistration, err error) { + result = &v1beta1.ControllerRegistration{} + err = c.client.Patch(pt). + Resource("controllerregistrations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/core_client.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/core_client.go new file mode 100644 index 000000000..01ada3d78 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/core_client.go @@ -0,0 +1,139 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type CoreV1beta1Interface interface { + RESTClient() rest.Interface + BackupBucketsGetter + BackupEntriesGetter + CloudProfilesGetter + ControllerInstallationsGetter + ControllerRegistrationsGetter + PlantsGetter + ProjectsGetter + QuotasGetter + SecretBindingsGetter + SeedsGetter + ShootsGetter +} + +// CoreV1beta1Client is used to interact with features provided by the core.gardener.cloud group. +type CoreV1beta1Client struct { + restClient rest.Interface +} + +func (c *CoreV1beta1Client) BackupBuckets() BackupBucketInterface { + return newBackupBuckets(c) +} + +func (c *CoreV1beta1Client) BackupEntries(namespace string) BackupEntryInterface { + return newBackupEntries(c, namespace) +} + +func (c *CoreV1beta1Client) CloudProfiles() CloudProfileInterface { + return newCloudProfiles(c) +} + +func (c *CoreV1beta1Client) ControllerInstallations() ControllerInstallationInterface { + return newControllerInstallations(c) +} + +func (c *CoreV1beta1Client) ControllerRegistrations() ControllerRegistrationInterface { + return newControllerRegistrations(c) +} + +func (c *CoreV1beta1Client) Plants(namespace string) PlantInterface { + return newPlants(c, namespace) +} + +func (c *CoreV1beta1Client) Projects() ProjectInterface { + return newProjects(c) +} + +func (c *CoreV1beta1Client) Quotas(namespace string) QuotaInterface { + return newQuotas(c, namespace) +} + +func (c *CoreV1beta1Client) SecretBindings(namespace string) SecretBindingInterface { + return newSecretBindings(c, namespace) +} + +func (c *CoreV1beta1Client) Seeds() SeedInterface { + return newSeeds(c) +} + +func (c *CoreV1beta1Client) Shoots(namespace string) ShootInterface { + return newShoots(c, namespace) +} + +// NewForConfig creates a new CoreV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*CoreV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new CoreV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CoreV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreV1beta1Client for the given RESTClient. +func New(c rest.Interface) *CoreV1beta1Client { + return &CoreV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/doc.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/doc.go new file mode 100644 index 000000000..11866df3c --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/generated_expansion.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/generated_expansion.go new file mode 100644 index 000000000..6ca0f943f --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/generated_expansion.go @@ -0,0 +1,41 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type BackupBucketExpansion interface{} + +type BackupEntryExpansion interface{} + +type CloudProfileExpansion interface{} + +type ControllerInstallationExpansion interface{} + +type ControllerRegistrationExpansion interface{} + +type PlantExpansion interface{} + +type ProjectExpansion interface{} + +type QuotaExpansion interface{} + +type SecretBindingExpansion interface{} + +type SeedExpansion interface{} + +type ShootExpansion interface{} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/plant.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/plant.go new file mode 100644 index 000000000..1beb236a0 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/plant.go @@ -0,0 +1,195 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PlantsGetter has a method to return a PlantInterface. +// A group's client should implement this interface. +type PlantsGetter interface { + Plants(namespace string) PlantInterface +} + +// PlantInterface has methods to work with Plant resources. +type PlantInterface interface { + Create(ctx context.Context, plant *v1beta1.Plant, opts v1.CreateOptions) (*v1beta1.Plant, error) + Update(ctx context.Context, plant *v1beta1.Plant, opts v1.UpdateOptions) (*v1beta1.Plant, error) + UpdateStatus(ctx context.Context, plant *v1beta1.Plant, opts v1.UpdateOptions) (*v1beta1.Plant, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Plant, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PlantList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Plant, err error) + PlantExpansion +} + +// plants implements PlantInterface +type plants struct { + client rest.Interface + ns string +} + +// newPlants returns a Plants +func newPlants(c *CoreV1beta1Client, namespace string) *plants { + return &plants{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the plant, and returns the corresponding plant object, and an error if there is any. +func (c *plants) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Plant, err error) { + result = &v1beta1.Plant{} + err = c.client.Get(). + Namespace(c.ns). + Resource("plants"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Plants that match those selectors. +func (c *plants) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PlantList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.PlantList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("plants"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested plants. +func (c *plants) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("plants"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a plant and creates it. Returns the server's representation of the plant, and an error, if there is any. +func (c *plants) Create(ctx context.Context, plant *v1beta1.Plant, opts v1.CreateOptions) (result *v1beta1.Plant, err error) { + result = &v1beta1.Plant{} + err = c.client.Post(). + Namespace(c.ns). + Resource("plants"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(plant). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a plant and updates it. Returns the server's representation of the plant, and an error, if there is any. +func (c *plants) Update(ctx context.Context, plant *v1beta1.Plant, opts v1.UpdateOptions) (result *v1beta1.Plant, err error) { + result = &v1beta1.Plant{} + err = c.client.Put(). + Namespace(c.ns). + Resource("plants"). + Name(plant.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(plant). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *plants) UpdateStatus(ctx context.Context, plant *v1beta1.Plant, opts v1.UpdateOptions) (result *v1beta1.Plant, err error) { + result = &v1beta1.Plant{} + err = c.client.Put(). + Namespace(c.ns). + Resource("plants"). + Name(plant.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(plant). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the plant and deletes it. Returns an error if one occurs. +func (c *plants) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("plants"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *plants) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("plants"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched plant. +func (c *plants) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Plant, err error) { + result = &v1beta1.Plant{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("plants"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/project.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/project.go new file mode 100644 index 000000000..b0d08d49c --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/project.go @@ -0,0 +1,184 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ProjectsGetter has a method to return a ProjectInterface. +// A group's client should implement this interface. +type ProjectsGetter interface { + Projects() ProjectInterface +} + +// ProjectInterface has methods to work with Project resources. +type ProjectInterface interface { + Create(ctx context.Context, project *v1beta1.Project, opts v1.CreateOptions) (*v1beta1.Project, error) + Update(ctx context.Context, project *v1beta1.Project, opts v1.UpdateOptions) (*v1beta1.Project, error) + UpdateStatus(ctx context.Context, project *v1beta1.Project, opts v1.UpdateOptions) (*v1beta1.Project, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Project, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ProjectList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Project, err error) + ProjectExpansion +} + +// projects implements ProjectInterface +type projects struct { + client rest.Interface +} + +// newProjects returns a Projects +func newProjects(c *CoreV1beta1Client) *projects { + return &projects{ + client: c.RESTClient(), + } +} + +// Get takes name of the project, and returns the corresponding project object, and an error if there is any. +func (c *projects) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Project, err error) { + result = &v1beta1.Project{} + err = c.client.Get(). + Resource("projects"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Projects that match those selectors. +func (c *projects) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ProjectList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.ProjectList{} + err = c.client.Get(). + Resource("projects"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested projects. +func (c *projects) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("projects"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a project and creates it. Returns the server's representation of the project, and an error, if there is any. +func (c *projects) Create(ctx context.Context, project *v1beta1.Project, opts v1.CreateOptions) (result *v1beta1.Project, err error) { + result = &v1beta1.Project{} + err = c.client.Post(). + Resource("projects"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(project). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a project and updates it. Returns the server's representation of the project, and an error, if there is any. +func (c *projects) Update(ctx context.Context, project *v1beta1.Project, opts v1.UpdateOptions) (result *v1beta1.Project, err error) { + result = &v1beta1.Project{} + err = c.client.Put(). + Resource("projects"). + Name(project.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(project). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *projects) UpdateStatus(ctx context.Context, project *v1beta1.Project, opts v1.UpdateOptions) (result *v1beta1.Project, err error) { + result = &v1beta1.Project{} + err = c.client.Put(). + Resource("projects"). + Name(project.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(project). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the project and deletes it. Returns an error if one occurs. +func (c *projects) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("projects"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *projects) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("projects"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched project. +func (c *projects) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Project, err error) { + result = &v1beta1.Project{} + err = c.client.Patch(pt). + Resource("projects"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/quota.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/quota.go new file mode 100644 index 000000000..73aef2bea --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/quota.go @@ -0,0 +1,178 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// QuotasGetter has a method to return a QuotaInterface. +// A group's client should implement this interface. +type QuotasGetter interface { + Quotas(namespace string) QuotaInterface +} + +// QuotaInterface has methods to work with Quota resources. +type QuotaInterface interface { + Create(ctx context.Context, quota *v1beta1.Quota, opts v1.CreateOptions) (*v1beta1.Quota, error) + Update(ctx context.Context, quota *v1beta1.Quota, opts v1.UpdateOptions) (*v1beta1.Quota, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Quota, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.QuotaList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Quota, err error) + QuotaExpansion +} + +// quotas implements QuotaInterface +type quotas struct { + client rest.Interface + ns string +} + +// newQuotas returns a Quotas +func newQuotas(c *CoreV1beta1Client, namespace string) *quotas { + return "as{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the quota, and returns the corresponding quota object, and an error if there is any. +func (c *quotas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Quota, err error) { + result = &v1beta1.Quota{} + err = c.client.Get(). + Namespace(c.ns). + Resource("quotas"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Quotas that match those selectors. +func (c *quotas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.QuotaList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.QuotaList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("quotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested quotas. +func (c *quotas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("quotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a quota and creates it. Returns the server's representation of the quota, and an error, if there is any. +func (c *quotas) Create(ctx context.Context, quota *v1beta1.Quota, opts v1.CreateOptions) (result *v1beta1.Quota, err error) { + result = &v1beta1.Quota{} + err = c.client.Post(). + Namespace(c.ns). + Resource("quotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(quota). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a quota and updates it. Returns the server's representation of the quota, and an error, if there is any. +func (c *quotas) Update(ctx context.Context, quota *v1beta1.Quota, opts v1.UpdateOptions) (result *v1beta1.Quota, err error) { + result = &v1beta1.Quota{} + err = c.client.Put(). + Namespace(c.ns). + Resource("quotas"). + Name(quota.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(quota). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the quota and deletes it. Returns an error if one occurs. +func (c *quotas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("quotas"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *quotas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("quotas"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched quota. +func (c *quotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Quota, err error) { + result = &v1beta1.Quota{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("quotas"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/secretbinding.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/secretbinding.go new file mode 100644 index 000000000..953846e93 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/secretbinding.go @@ -0,0 +1,178 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// SecretBindingsGetter has a method to return a SecretBindingInterface. +// A group's client should implement this interface. +type SecretBindingsGetter interface { + SecretBindings(namespace string) SecretBindingInterface +} + +// SecretBindingInterface has methods to work with SecretBinding resources. +type SecretBindingInterface interface { + Create(ctx context.Context, secretBinding *v1beta1.SecretBinding, opts v1.CreateOptions) (*v1beta1.SecretBinding, error) + Update(ctx context.Context, secretBinding *v1beta1.SecretBinding, opts v1.UpdateOptions) (*v1beta1.SecretBinding, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.SecretBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.SecretBindingList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.SecretBinding, err error) + SecretBindingExpansion +} + +// secretBindings implements SecretBindingInterface +type secretBindings struct { + client rest.Interface + ns string +} + +// newSecretBindings returns a SecretBindings +func newSecretBindings(c *CoreV1beta1Client, namespace string) *secretBindings { + return &secretBindings{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the secretBinding, and returns the corresponding secretBinding object, and an error if there is any. +func (c *secretBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.SecretBinding, err error) { + result = &v1beta1.SecretBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secretbindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of SecretBindings that match those selectors. +func (c *secretBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.SecretBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.SecretBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secretbindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested secretBindings. +func (c *secretBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("secretbindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a secretBinding and creates it. Returns the server's representation of the secretBinding, and an error, if there is any. +func (c *secretBindings) Create(ctx context.Context, secretBinding *v1beta1.SecretBinding, opts v1.CreateOptions) (result *v1beta1.SecretBinding, err error) { + result = &v1beta1.SecretBinding{} + err = c.client.Post(). + Namespace(c.ns). + Resource("secretbindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(secretBinding). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a secretBinding and updates it. Returns the server's representation of the secretBinding, and an error, if there is any. +func (c *secretBindings) Update(ctx context.Context, secretBinding *v1beta1.SecretBinding, opts v1.UpdateOptions) (result *v1beta1.SecretBinding, err error) { + result = &v1beta1.SecretBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("secretbindings"). + Name(secretBinding.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(secretBinding). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the secretBinding and deletes it. Returns an error if one occurs. +func (c *secretBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("secretbindings"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *secretBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("secretbindings"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched secretBinding. +func (c *secretBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.SecretBinding, err error) { + result = &v1beta1.SecretBinding{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("secretbindings"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/seed.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/seed.go new file mode 100644 index 000000000..05b1df257 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/seed.go @@ -0,0 +1,184 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// SeedsGetter has a method to return a SeedInterface. +// A group's client should implement this interface. +type SeedsGetter interface { + Seeds() SeedInterface +} + +// SeedInterface has methods to work with Seed resources. +type SeedInterface interface { + Create(ctx context.Context, seed *v1beta1.Seed, opts v1.CreateOptions) (*v1beta1.Seed, error) + Update(ctx context.Context, seed *v1beta1.Seed, opts v1.UpdateOptions) (*v1beta1.Seed, error) + UpdateStatus(ctx context.Context, seed *v1beta1.Seed, opts v1.UpdateOptions) (*v1beta1.Seed, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Seed, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.SeedList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Seed, err error) + SeedExpansion +} + +// seeds implements SeedInterface +type seeds struct { + client rest.Interface +} + +// newSeeds returns a Seeds +func newSeeds(c *CoreV1beta1Client) *seeds { + return &seeds{ + client: c.RESTClient(), + } +} + +// Get takes name of the seed, and returns the corresponding seed object, and an error if there is any. +func (c *seeds) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Seed, err error) { + result = &v1beta1.Seed{} + err = c.client.Get(). + Resource("seeds"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Seeds that match those selectors. +func (c *seeds) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.SeedList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.SeedList{} + err = c.client.Get(). + Resource("seeds"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested seeds. +func (c *seeds) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("seeds"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a seed and creates it. Returns the server's representation of the seed, and an error, if there is any. +func (c *seeds) Create(ctx context.Context, seed *v1beta1.Seed, opts v1.CreateOptions) (result *v1beta1.Seed, err error) { + result = &v1beta1.Seed{} + err = c.client.Post(). + Resource("seeds"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(seed). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a seed and updates it. Returns the server's representation of the seed, and an error, if there is any. +func (c *seeds) Update(ctx context.Context, seed *v1beta1.Seed, opts v1.UpdateOptions) (result *v1beta1.Seed, err error) { + result = &v1beta1.Seed{} + err = c.client.Put(). + Resource("seeds"). + Name(seed.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(seed). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *seeds) UpdateStatus(ctx context.Context, seed *v1beta1.Seed, opts v1.UpdateOptions) (result *v1beta1.Seed, err error) { + result = &v1beta1.Seed{} + err = c.client.Put(). + Resource("seeds"). + Name(seed.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(seed). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the seed and deletes it. Returns an error if one occurs. +func (c *seeds) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("seeds"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *seeds) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("seeds"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched seed. +func (c *seeds) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Seed, err error) { + result = &v1beta1.Seed{} + err = c.client.Patch(pt). + Resource("seeds"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/shoot.go b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/shoot.go new file mode 100644 index 000000000..b9c904c65 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/core/clientset/versioned/typed/core/v1beta1/shoot.go @@ -0,0 +1,195 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + scheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ShootsGetter has a method to return a ShootInterface. +// A group's client should implement this interface. +type ShootsGetter interface { + Shoots(namespace string) ShootInterface +} + +// ShootInterface has methods to work with Shoot resources. +type ShootInterface interface { + Create(ctx context.Context, shoot *v1beta1.Shoot, opts v1.CreateOptions) (*v1beta1.Shoot, error) + Update(ctx context.Context, shoot *v1beta1.Shoot, opts v1.UpdateOptions) (*v1beta1.Shoot, error) + UpdateStatus(ctx context.Context, shoot *v1beta1.Shoot, opts v1.UpdateOptions) (*v1beta1.Shoot, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Shoot, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ShootList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Shoot, err error) + ShootExpansion +} + +// shoots implements ShootInterface +type shoots struct { + client rest.Interface + ns string +} + +// newShoots returns a Shoots +func newShoots(c *CoreV1beta1Client, namespace string) *shoots { + return &shoots{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the shoot, and returns the corresponding shoot object, and an error if there is any. +func (c *shoots) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Shoot, err error) { + result = &v1beta1.Shoot{} + err = c.client.Get(). + Namespace(c.ns). + Resource("shoots"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Shoots that match those selectors. +func (c *shoots) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ShootList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.ShootList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("shoots"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested shoots. +func (c *shoots) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("shoots"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a shoot and creates it. Returns the server's representation of the shoot, and an error, if there is any. +func (c *shoots) Create(ctx context.Context, shoot *v1beta1.Shoot, opts v1.CreateOptions) (result *v1beta1.Shoot, err error) { + result = &v1beta1.Shoot{} + err = c.client.Post(). + Namespace(c.ns). + Resource("shoots"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(shoot). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a shoot and updates it. Returns the server's representation of the shoot, and an error, if there is any. +func (c *shoots) Update(ctx context.Context, shoot *v1beta1.Shoot, opts v1.UpdateOptions) (result *v1beta1.Shoot, err error) { + result = &v1beta1.Shoot{} + err = c.client.Put(). + Namespace(c.ns). + Resource("shoots"). + Name(shoot.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(shoot). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *shoots) UpdateStatus(ctx context.Context, shoot *v1beta1.Shoot, opts v1.UpdateOptions) (result *v1beta1.Shoot, err error) { + result = &v1beta1.Shoot{} + err = c.client.Put(). + Namespace(c.ns). + Resource("shoots"). + Name(shoot.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(shoot). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the shoot and deletes it. Returns an error if one occurs. +func (c *shoots) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("shoots"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *shoots) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("shoots"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched shoot. +func (c *shoots) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Shoot, err error) { + result = &v1beta1.Shoot{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("shoots"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/extensions/clientset/versioned/scheme/doc.go b/vendor/github.com/gardener/gardener/pkg/client/extensions/clientset/versioned/scheme/doc.go new file mode 100644 index 000000000..7d4fb776b --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/extensions/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/gardener/gardener/pkg/client/extensions/clientset/versioned/scheme/register.go b/vendor/github.com/gardener/gardener/pkg/client/extensions/clientset/versioned/scheme/register.go new file mode 100644 index 000000000..22a1acecf --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/extensions/clientset/versioned/scheme/register.go @@ -0,0 +1,56 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + extensionsv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/admissionplugins.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/admissionplugins.go new file mode 100644 index 000000000..0095d6d80 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/admissionplugins.go @@ -0,0 +1,94 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "fmt" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + + "github.com/Masterminds/semver" +) + +var ( + defaultPlugins = []gardencorev1beta1.AdmissionPlugin{ + {Name: "Priority"}, + {Name: "NamespaceLifecycle"}, + {Name: "LimitRanger"}, + {Name: "PodSecurityPolicy"}, + {Name: "ServiceAccount"}, + {Name: "NodeRestriction"}, + {Name: "DefaultStorageClass"}, + {Name: "DefaultTolerationSeconds"}, + {Name: "ResourceQuota"}, + {Name: "StorageObjectInUseProtection"}, + {Name: "MutatingAdmissionWebhook"}, + {Name: "ValidatingAdmissionWebhook"}, + } + defaultPluginsWithInitializers = append(defaultPlugins, gardencorev1beta1.AdmissionPlugin{Name: "Initializers"}) + + lowestSupportedKubernetesVersionMajorMinor = "1.10" + lowestSupportedKubernetesVersion, _ = semver.NewVersion(lowestSupportedKubernetesVersionMajorMinor) + + admissionPlugins = map[string][]gardencorev1beta1.AdmissionPlugin{ + "1.10": defaultPluginsWithInitializers, + "1.11": defaultPluginsWithInitializers, + "1.12": defaultPluginsWithInitializers, + "1.13": defaultPluginsWithInitializers, + "1.14": defaultPlugins, + } +) + +// GetAdmissionPluginsForVersion returns the set of default admission plugins for the given Kubernetes version. +// If the given Kubernetes version does not explicitly define admission plugins the set of names for the next +// available version will be returned (e.g., for version X not defined the set of version X-1 will be returned). +func GetAdmissionPluginsForVersion(v string) []gardencorev1beta1.AdmissionPlugin { + return copyPlugins(getAdmissionPluginsForVersionInternal(v)) +} + +func getAdmissionPluginsForVersionInternal(v string) []gardencorev1beta1.AdmissionPlugin { + version, err := semver.NewVersion(v) + if err != nil { + return admissionPlugins[lowestSupportedKubernetesVersionMajorMinor] + } + + if version.LessThan(lowestSupportedKubernetesVersion) { + return admissionPlugins[lowestSupportedKubernetesVersionMajorMinor] + } + + majorMinor := formatMajorMinor(version.Major(), version.Minor()) + if pluginsForVersion, ok := admissionPlugins[majorMinor]; ok { + return pluginsForVersion + } + + // We do not handle decrementing the major part of the version. The reason for this is that we would have to set + // the minor part to some higher value which we don't know (assume we go from 2.2->2.1->2.0->1.?). We decided not + // to handle decrementing the major part at all, as if Gardener supports Kubernetes 2.X (independent of the fact + // that it's anyway unclear when/whether that will come) many parts have to be adapted anyway. + return GetAdmissionPluginsForVersion(formatMajorMinor(version.Major(), version.Minor()-1)) +} + +func formatMajorMinor(major, minor int64) string { + return fmt.Sprintf("%d.%d", major, minor) +} + +func copyPlugins(admissionPlugins []gardencorev1beta1.AdmissionPlugin) []gardencorev1beta1.AdmissionPlugin { + dst := make([]gardencorev1beta1.AdmissionPlugin, 0) + for _, plugin := range admissionPlugins { + pluginPointer := &plugin + dst = append(dst, *pluginPointer.DeepCopy()) + } + return dst +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/applier.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/applier.go new file mode 100644 index 000000000..1718e8540 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/applier.go @@ -0,0 +1,432 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "bytes" + "context" + "fmt" + "io" + + utilerrors "github.com/gardener/gardener/pkg/utils/errors" + + "github.com/hashicorp/go-multierror" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// defaultApplier applies objects by retrieving their current state and then either creating / updating them +// (update happens with a predefined merge logic). +type defaultApplier struct { + client client.Client + restMapper meta.RESTMapper +} + +// NewApplier constructs a new Applier from the given client. +func NewApplier(c client.Client, restMapper meta.RESTMapper) Applier { + return &defaultApplier{client: c, restMapper: restMapper} +} + +// NewApplierForConfig creates a new Applier for the given rest.Config. +// Use NewApplier if you already have a client and RESTMapper at hand, as this will create a new direct client. +func NewApplierForConfig(config *rest.Config) (Applier, error) { + opts := client.Options{} + + if err := setClientOptionsDefaults(config, &opts); err != nil { + return nil, err + } + + c, err := NewDirectClient(config, opts) + if err != nil { + return nil, err + } + + return NewApplier(c, opts.Mapper), nil +} + +func (a *defaultApplier) applyObject(ctx context.Context, desired *unstructured.Unstructured, options MergeFuncs) error { + // look up scope of objects' kind to check, if we should default the namespace field + mapping, err := a.restMapper.RESTMapping(desired.GroupVersionKind().GroupKind(), desired.GroupVersionKind().Version) + if err != nil || mapping == nil { + // Don't reset RESTMapper in case of cache misses. Most probably indicates, that the corresponding CRD is not yet applied. + // CRD might be applied later as part of the same chart + + // default namespace on a best effort basis + if desired.GetKind() != "Namespace" && desired.GetNamespace() == "" { + desired.SetNamespace(metav1.NamespaceDefault) + } + } else { + if mapping.Scope.Name() == meta.RESTScopeNameNamespace { + // default namespace field to `default` in case of namespaced kinds + if desired.GetNamespace() == "" { + desired.SetNamespace(metav1.NamespaceDefault) + } + } else { + // unset namespace field in case of non-namespaced kinds + desired.SetNamespace("") + } + } + + key := client.ObjectKeyFromObject(desired) + if len(key.Name) == 0 { + return fmt.Errorf("missing 'metadata.name' in: %+v", desired) + } + + current := &unstructured.Unstructured{} + current.SetGroupVersionKind(desired.GroupVersionKind()) + if err = a.client.Get(ctx, key, current); err != nil { + if apierrors.IsNotFound(err) { + return a.client.Create(ctx, desired) + } + return err + } + + if err := a.mergeObjects(desired, current, options); err != nil { + return err + } + + return a.client.Update(ctx, desired) +} + +func (a *defaultApplier) deleteObject(ctx context.Context, desired *unstructured.Unstructured, opts *DeleteManifestOptions) error { + if desired.GetNamespace() == "" { + desired.SetNamespace(metav1.NamespaceDefault) + } + if len(desired.GetName()) == 0 { + return fmt.Errorf("missing 'metadata.name' in: %+v", desired) + } + + err := a.client.Delete(ctx, desired) + if err != nil { + // this is kept for backwards compatibility. + if apierrors.IsNotFound(err) { + return nil + } + + for _, tf := range opts.TolerateErrorFuncs { + if tf != nil && tf(err) { + return nil + } + } + } + + return err +} + +// DefaultMergeFuncs contains options for common k8s objects, e.g. Service, ServiceAccount. +var ( + DefaultMergeFuncs = MergeFuncs{ + corev1.SchemeGroupVersion.WithKind("Service").GroupKind(): func(newObj, oldObj *unstructured.Unstructured) { + newSvcType, found, _ := unstructured.NestedString(newObj.Object, "spec", "type") + if !found { + newSvcType = string(corev1.ServiceTypeClusterIP) + _ = unstructured.SetNestedField(newObj.Object, newSvcType, "spec", "type") + } + + oldSvcType, found, _ := unstructured.NestedString(oldObj.Object, "spec", "type") + if !found { + oldSvcType = string(corev1.ServiceTypeClusterIP) + } + + switch newSvcType { + case string(corev1.ServiceTypeLoadBalancer), string(corev1.ServiceTypeNodePort): + oldPorts, found, _ := unstructured.NestedSlice(oldObj.Object, "spec", "ports") + if !found { + // no old ports probably means that the service was of type External name before. + break + } + + newPorts, found, _ := unstructured.NestedSlice(newObj.Object, "spec", "ports") + if !found { + // no new ports is safe to ignore + break + } + + ports := make([]interface{}, 0, len(newPorts)) + for _, newPort := range newPorts { + np := newPort.(map[string]interface{}) + npName, _, _ := unstructured.NestedString(np, "name") + npPort, _ := nestedFloat64OrInt64(np, "port") + nodePort, ok := nestedFloat64OrInt64(np, "nodePort") + + for _, oldPortObj := range oldPorts { + op := oldPortObj.(map[string]interface{}) + opName, _, _ := unstructured.NestedString(op, "name") + opPort, _ := nestedFloat64OrInt64(op, "port") + + if (opName == npName || opPort == npPort) && (!ok || nodePort == 0) { + np["nodePort"] = op["nodePort"] + } + } + + ports = append(ports, np) + } + + _ = unstructured.SetNestedSlice(newObj.Object, ports, "spec", "ports") + + case string(corev1.ServiceTypeExternalName): + // there is no ClusterIP in this case + return + } + + // ClusterIP is immutable unless that old service is of type ExternalName + if oldSvcType != string(corev1.ServiceTypeExternalName) { + newClusterIP, _, _ := unstructured.NestedString(newObj.Object, "spec", "clusterIP") + if newClusterIP != corev1.ClusterIPNone || newSvcType != string(corev1.ServiceTypeClusterIP) { + oldClusterIP, _, _ := unstructured.NestedString(oldObj.Object, "spec", "clusterIP") + _ = unstructured.SetNestedField(newObj.Object, oldClusterIP, "spec", "clusterIP") + } + } + + newETP, _, _ := unstructured.NestedString(newObj.Object, "spec", "externalTrafficPolicy") + oldETP, _, _ := unstructured.NestedString(oldObj.Object, "spec", "externalTrafficPolicy") + + if oldSvcType == string(corev1.ServiceTypeLoadBalancer) && + newSvcType == string(corev1.ServiceTypeLoadBalancer) && + newETP == string(corev1.ServiceExternalTrafficPolicyTypeLocal) && + oldETP == string(corev1.ServiceExternalTrafficPolicyTypeLocal) { + newHealthCheckPort, _ := nestedFloat64OrInt64(newObj.Object, "spec", "healthCheckNodePort") + if newHealthCheckPort == 0 { + oldHealthCheckPort, _ := nestedFloat64OrInt64(oldObj.Object, "spec", "healthCheckNodePort") + _ = unstructured.SetNestedField(newObj.Object, oldHealthCheckPort, "spec", "healthCheckNodePort") + } + } + + }, + corev1.SchemeGroupVersion.WithKind("ServiceAccount").GroupKind(): func(newObj, oldObj *unstructured.Unstructured) { + // We do not want to overwrite a ServiceAccount's `.secrets[]` list or `.imagePullSecrets[]`. + newObj.Object["secrets"] = oldObj.Object["secrets"] + newObj.Object["imagePullSecrets"] = oldObj.Object["imagePullSecrets"] + }, + {Group: "autoscaling.k8s.io", Kind: "VerticalPodAutoscaler"}: func(newObj, oldObj *unstructured.Unstructured) { + // Never override the status of VPA resources + newObj.Object["status"] = oldObj.Object["status"] + }, + } + + DeploymentKeepReplicasMergeFunc = MergeFunc(func(newObj, oldObj *unstructured.Unstructured) { + oldReplicas, ok := nestedFloat64OrInt64(oldObj.Object, "spec", "replicas") + if !ok { + return + } + _ = unstructured.SetNestedField(newObj.Object, oldReplicas, "spec", "replicas") + }) +) + +func nestedFloat64OrInt64(obj map[string]interface{}, fields ...string) (int64, bool) { + val, found, err := unstructured.NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return 0, found + } + + f, ok := val.(float64) + if ok { + return int64(f), true + } + + i, ok := val.(int64) + if ok { + return i, true + } + + return 0, false +} + +// CopyApplierOptions returns a copies of the provided applier options. +func CopyApplierOptions(in MergeFuncs) MergeFuncs { + out := make(MergeFuncs, len(in)) + + for k, v := range in { + out[k] = v + } + + return out +} + +func (a *defaultApplier) mergeObjects(newObj, oldObj *unstructured.Unstructured, mergeFuncs MergeFuncs) error { + newObj.SetResourceVersion(oldObj.GetResourceVersion()) + + // We do not want to overwrite the Finalizers. + newObj.Object["metadata"].(map[string]interface{})["finalizers"] = oldObj.Object["metadata"].(map[string]interface{})["finalizers"] + + if merge, ok := mergeFuncs[newObj.GroupVersionKind().GroupKind()]; ok { + merge(newObj, oldObj) + } + + return nil +} + +// ApplyManifest is a function which does the same like `kubectl apply -f `. It takes a bunch of manifests , +// all concatenated in a byte slice, and sends them one after the other to the API server. If a resource +// already exists at the API server, it will update it. It returns an error as soon as the first error occurs. +func (a *defaultApplier) ApplyManifest(ctx context.Context, r UnstructuredReader, options MergeFuncs) error { + allErrs := &multierror.Error{ + ErrorFormat: utilerrors.NewErrorFormatFuncWithPrefix("failed to apply manifests"), + } + + for { + obj, err := r.Read() + if err == io.EOF { + break + } + if err != nil { + allErrs = multierror.Append(allErrs, fmt.Errorf("could not read object: %+v", err)) + continue + } + if obj == nil { + continue + } + + if err := a.applyObject(ctx, obj, options); err != nil { + allErrs = multierror.Append(allErrs, fmt.Errorf("could not apply object of kind %q \"%s/%s\": %+v", obj.GetKind(), obj.GetNamespace(), obj.GetName(), err)) + continue + } + } + + return allErrs.ErrorOrNil() +} + +// DeleteManifest is a function which does the same like `kubectl delete -f `. It takes a bunch of manifests , +// all concatenated in a byte slice, and sends them one after the other to the API server for deletion. +// It returns an error as soon as the first error occurs. +func (a *defaultApplier) DeleteManifest(ctx context.Context, r UnstructuredReader, opts ...DeleteManifestOption) error { + allErrs := &multierror.Error{ + ErrorFormat: utilerrors.NewErrorFormatFuncWithPrefix("failed to delete manifests"), + } + + deleteOps := &DeleteManifestOptions{} + + for _, o := range opts { + if o != nil { + o.MutateDeleteManifestOptions(deleteOps) + } + } + + for { + obj, err := r.Read() + if err == io.EOF { + break + } + if err != nil { + allErrs = multierror.Append(allErrs, fmt.Errorf("could not read object: %+v", err)) + continue + } + if obj == nil { + continue + } + + if err := a.deleteObject(ctx, obj, deleteOps); err != nil { + allErrs = multierror.Append(allErrs, fmt.Errorf("could not delete object of kind %q \"%s/%s\": %+v", obj.GetKind(), obj.GetNamespace(), obj.GetName(), err)) + continue + } + } + + return allErrs.ErrorOrNil() +} + +// UnstructuredReader an interface that all manifest readers should implement +type UnstructuredReader interface { + Read() (*unstructured.Unstructured, error) +} + +// NewManifestReader initializes a reader for yaml manifests +func NewManifestReader(manifest []byte) UnstructuredReader { + return &manifestReader{ + decoder: yaml.NewYAMLOrJSONDecoder(bytes.NewReader(manifest), 1024), + manifest: manifest, + } +} + +// manifestReader is an unstructured reader that contains a JSONDecoder +type manifestReader struct { + decoder *yaml.YAMLOrJSONDecoder + manifest []byte +} + +// Read decodes yaml data into an unstructured object +func (m *manifestReader) Read() (*unstructured.Unstructured, error) { + // loop for skipping empty yaml objects + for { + var data map[string]interface{} + + err := m.decoder.Decode(&data) + if err == io.EOF { + return nil, err + } + if err != nil { + return nil, fmt.Errorf("error '%+v' decoding manifest: %s", err, string(m.manifest)) + } + if data == nil { + continue + } + + return &unstructured.Unstructured{Object: data}, nil + } +} + +// NewNamespaceSettingReader initializes a reader for yaml manifests with support for setting the namespace +func NewNamespaceSettingReader(mReader UnstructuredReader, namespace string) UnstructuredReader { + return &namespaceSettingReader{ + reader: mReader, + namespace: namespace, + } +} + +// namespaceSettingReader is an unstructured reader that contains a JSONDecoder and a manifest reader (or other reader types) +type namespaceSettingReader struct { + reader UnstructuredReader + namespace string +} + +// Read decodes yaml data into an unstructured object +func (n *namespaceSettingReader) Read() (*unstructured.Unstructured, error) { + readObj, err := n.reader.Read() + if err != nil { + return nil, err + } + + readObj.SetNamespace(n.namespace) + + return readObj, nil +} + +// NewObjectReferenceReader initializes a reader from ObjectReference +func NewObjectReferenceReader(objectReference *corev1.ObjectReference) UnstructuredReader { + return &objectReferenceReader{ + objectReference: objectReference, + } +} + +// objectReferenceReader is an unstructured reader that contains a ObjectReference +type objectReferenceReader struct { + objectReference *corev1.ObjectReference +} + +// Read translates ObjectReference into Unstructured object +func (r *objectReferenceReader) Read() (*unstructured.Unstructured, error) { + obj := &unstructured.Unstructured{} + obj.SetAPIVersion(r.objectReference.APIVersion) + obj.SetKind(r.objectReference.Kind) + obj.SetNamespace(r.objectReference.Namespace) + obj.SetName(r.objectReference.Name) + + return obj, nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartapplier.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartapplier.go new file mode 100644 index 000000000..36a33edeb --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartapplier.go @@ -0,0 +1,124 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + + "github.com/gardener/gardener/pkg/chartrenderer" + + "k8s.io/client-go/rest" +) + +// ChartApplier is an interface that describes needed methods that render and apply +// Helm charts in Kubernetes clusters. +type ChartApplier interface { + chartrenderer.Interface + Apply(ctx context.Context, chartPath, namespace, name string, opts ...ApplyOption) error + Delete(ctx context.Context, chartPath, namespace, name string, opts ...DeleteOption) error +} + +// chartApplier is a structure that contains a chart renderer and a manifest applier. +type chartApplier struct { + chartrenderer.Interface + Applier +} + +// NewChartApplier returns a new chart applier. +func NewChartApplier(renderer chartrenderer.Interface, applier Applier) ChartApplier { + return &chartApplier{renderer, applier} +} + +// NewChartApplierForConfig returns a new chart applier based on the given REST config. +func NewChartApplierForConfig(config *rest.Config) (ChartApplier, error) { + renderer, err := chartrenderer.NewForConfig(config) + if err != nil { + return nil, err + } + applier, err := NewApplierForConfig(config) + if err != nil { + return nil, err + } + return NewChartApplier(renderer, applier), nil +} + +// Apply takes a path to a chart , name of the release , +// release's namespace and renders the template based value. +// The resulting manifest will be applied to the cluster the Kubernetes client has been created for. +// can be used to enchance the existing functionality. +func (c *chartApplier) Apply(ctx context.Context, chartPath, namespace, name string, opts ...ApplyOption) error { + applyOpts := &ApplyOptions{} + + for _, o := range opts { + if o != nil { + o.MutateApplyOptions(applyOpts) + } + } + + if len(applyOpts.MergeFuncs) == 0 { + applyOpts.MergeFuncs = DefaultMergeFuncs + } + + manifestReader, err := c.manifestReader(chartPath, namespace, name, applyOpts.Values) + if err != nil { + return err + } + + if applyOpts.ForceNamespace { + manifestReader = NewNamespaceSettingReader(manifestReader, namespace) + } + + return c.ApplyManifest(ctx, manifestReader, applyOpts.MergeFuncs) +} + +// Delete takes a path to a chart , name of the release , +// release's namespace and renders the template. +// The resulting manifest will be deleted from the cluster the Kubernetes client has been created for. +func (c *chartApplier) Delete(ctx context.Context, chartPath, namespace, name string, opts ...DeleteOption) error { + deleteOpts := &DeleteOptions{} + + for _, o := range opts { + if o != nil { + o.MutateDeleteOptions(deleteOpts) + } + } + + manifestReader, err := c.manifestReader(chartPath, namespace, name, deleteOpts.Values) + if err != nil { + return err + } + + if deleteOpts.ForceNamespace { + manifestReader = NewNamespaceSettingReader(manifestReader, namespace) + } + + deleteManifestOpts := []DeleteManifestOption{} + + for _, tf := range deleteOpts.TolerateErrorFuncs { + if tf != nil { + deleteManifestOpts = append(deleteManifestOpts, tf) + } + } + + return c.DeleteManifest(ctx, manifestReader, deleteManifestOpts...) +} + +func (c *chartApplier) manifestReader(chartPath, namespace, name string, values interface{}) (UnstructuredReader, error) { + release, err := c.Render(chartPath, name, namespace, values) + if err != nil { + return nil, err + } + return NewManifestReader(release.Manifest()), nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartoptions.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartoptions.go new file mode 100644 index 000000000..438a1ca5d --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartoptions.go @@ -0,0 +1,125 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// ApplyOption is some configuration that modifies options for a apply request. +type ApplyOption interface { + // MutateApplyOptions applies this configuration to the given apply options. + MutateApplyOptions(opts *ApplyOptions) +} + +// ApplyOptions contains options for apply requests +type ApplyOptions struct { + // Values to pass to chart. + Values interface{} + + // Additional MergeFunctions. + MergeFuncs MergeFuncs + + // Forces the namespace for chart objects when applying the chart, this is because sometimes native chart + // objects do not come with a Release.Namespace option and leave the namespace field empty + ForceNamespace bool +} + +// Values applies values to ApplyOptions or DeleteOptions. +var Values = func(values interface{}) ValueOption { return &withValue{values} } + +type withValue struct { + values interface{} +} + +func (v withValue) MutateApplyOptions(opts *ApplyOptions) { + opts.Values = v.values +} + +func (v withValue) MutateDeleteOptions(opts *DeleteOptions) { + opts.Values = v.values +} + +// MergeFuncs can be used modify the default merge functions for ApplyOptions: +// +// Apply(ctx, "chart", "my-ns", "my-release", MergeFuncs{ +// corev1.SchemeGroupVersion.WithKind("Service").GroupKind(): func(newObj, oldObj *unstructured.Unstructured) { +// newObj.SetAnnotations(map[string]string{"foo":"bar"}) +// } +// }) +type MergeFuncs map[schema.GroupKind]MergeFunc + +// MutateApplyOptions applies this configuration to the given apply options. +func (m MergeFuncs) MutateApplyOptions(opts *ApplyOptions) { + opts.MergeFuncs = m +} + +// ForceNamespace can be used for native chart objects do not come with +// a Release.Namespace option and leave the namespace field empty. +var ForceNamespace = forceNamespace{} + +type forceNamespace struct{} + +func (forceNamespace) MutateApplyOptions(opts *ApplyOptions) { + opts.ForceNamespace = true +} + +func (forceNamespace) MutateDeleteOptions(opts *DeleteOptions) { + opts.ForceNamespace = true +} + +// ValueOption contains value options for Apply and Delete. +type ValueOption interface { + ApplyOption + DeleteOption +} + +// DeleteOption is some configuration that modifies options for a delete request. +type DeleteOption interface { + // MutateDeleteOptions applies this configuration to the given delete options. + MutateDeleteOptions(opts *DeleteOptions) +} + +// DeleteOptions contains options for delete requests +type DeleteOptions struct { + // Values to pass to chart. + Values interface{} + + // Forces the namespace for chart objects when applying the chart, this is because sometimes native chart + // objects do not come with a Release.Namespace option and leave the namespace field empty + ForceNamespace bool + + // TolerateErrorFuncs are functions for which errors are tolerated. + TolerateErrorFuncs []TolerateErrorFunc +} + +// TolerateErrorFunc is a function for which err is tolerated. +type TolerateErrorFunc func(err error) bool + +func (t TolerateErrorFunc) MutateDeleteOptions(opts *DeleteOptions) { + if opts.TolerateErrorFuncs == nil { + opts.TolerateErrorFuncs = []TolerateErrorFunc{} + } + + opts.TolerateErrorFuncs = append(opts.TolerateErrorFuncs, t) +} + +func (t TolerateErrorFunc) MutateDeleteManifestOptions(opts *DeleteManifestOptions) { + if opts.TolerateErrorFuncs == nil { + opts.TolerateErrorFuncs = []TolerateErrorFunc{} + } + + opts.TolerateErrorFuncs = append(opts.TolerateErrorFuncs, t) +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/client.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/client.go new file mode 100644 index 000000000..f2a141764 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/client.go @@ -0,0 +1,357 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "errors" + "fmt" + + corev1 "k8s.io/api/core/v1" + apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + kubernetesclientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + componentbaseconfig "k8s.io/component-base/config" + apiserviceclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + + gardencoreclientset "github.com/gardener/gardener/pkg/client/core/clientset/versioned" + gardenercorescheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + gardenseedmanagementclientset "github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned" + seedmanagementscheme "github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/scheme" + settingsscheme "github.com/gardener/gardener/pkg/client/settings/clientset/versioned/scheme" + versionutils "github.com/gardener/gardener/pkg/utils/version" +) + +var ( + // UseCachedRuntimeClients is a flag for enabling cached controller-runtime clients (defaults to false). + // If enabled, the client returned by Interface.Client() will be backed by a cache, otherwise it will be the same + // client that will be returned by Interface.DirectClient(). + UseCachedRuntimeClients = false +) + +// KubeConfig is the key to the kubeconfig +const KubeConfig = "kubeconfig" + +func init() { + // enable protobuf for Gardener API for controller-runtime clients + protobufSchemeBuilder := runtime.NewSchemeBuilder( + gardenercorescheme.AddToScheme, + seedmanagementscheme.AddToScheme, + settingsscheme.AddToScheme, + ) + + utilruntime.Must(apiutil.AddToProtobufScheme(protobufSchemeBuilder.AddToScheme)) +} + +// NewClientFromFile creates a new Client struct for a given kubeconfig. The kubeconfig will be +// read from the filesystem at location . If given, overrides the +// master URL in the kubeconfig. +// If no filepath is given, the in-cluster configuration will be taken into account. +func NewClientFromFile(masterURL, kubeconfigPath string, fns ...ConfigFunc) (Interface, error) { + if kubeconfigPath == "" && masterURL == "" { + kubeconfig, err := rest.InClusterConfig() + if err != nil { + return nil, err + } + opts := append([]ConfigFunc{WithRESTConfig(kubeconfig)}, fns...) + return NewWithConfig(opts...) + } + + clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, + &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterURL}}, + ) + + if err := validateClientConfig(clientConfig); err != nil { + return nil, err + } + + config, err := clientConfig.ClientConfig() + if err != nil { + return nil, err + } + + opts := append([]ConfigFunc{WithRESTConfig(config)}, fns...) + return NewWithConfig(opts...) +} + +// NewClientFromBytes creates a new Client struct for a given kubeconfig byte slice. +func NewClientFromBytes(kubeconfig []byte, fns ...ConfigFunc) (Interface, error) { + config, err := RESTConfigFromClientConnectionConfiguration(nil, kubeconfig) + if err != nil { + return nil, err + } + + opts := append([]ConfigFunc{WithRESTConfig(config)}, fns...) + return NewWithConfig(opts...) +} + +// NewClientFromSecret creates a new Client struct for a given kubeconfig stored as a +// Secret in an existing Kubernetes cluster. This cluster will be accessed by the . It will +// read the Secret in . The Secret must contain a field "kubeconfig" which will +// be used. +func NewClientFromSecret(ctx context.Context, c client.Client, namespace, secretName string, fns ...ConfigFunc) (Interface, error) { + secret := &corev1.Secret{} + if err := c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: secretName}, secret); err != nil { + return nil, err + } + return NewClientFromSecretObject(secret, fns...) +} + +// NewClientFromSecretObject creates a new Client struct for a given Kubernetes Secret object. The Secret must +// contain a field "kubeconfig" which will be used. +func NewClientFromSecretObject(secret *corev1.Secret, fns ...ConfigFunc) (Interface, error) { + if kubeconfig, ok := secret.Data[KubeConfig]; ok { + if len(kubeconfig) == 0 { + return nil, errors.New("the secret's field 'kubeconfig' is empty") + } + + return NewClientFromBytes(kubeconfig, fns...) + } + return nil, errors.New("the secret does not contain a field with name 'kubeconfig'") +} + +// RESTConfigFromClientConnectionConfiguration creates a *rest.Config from a componentbaseconfig.ClientConnectionConfiguration & the configured kubeconfig +func RESTConfigFromClientConnectionConfiguration(cfg *componentbaseconfig.ClientConnectionConfiguration, kubeconfig []byte) (*rest.Config, error) { + var ( + restConfig *rest.Config + err error + ) + + if kubeconfig == nil { + clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Kubeconfig}, + &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: ""}}, + ) + + if err := validateClientConfig(clientConfig); err != nil { + return nil, err + } + + restConfig, err = clientConfig.ClientConfig() + if err != nil { + return nil, err + } + } else { + restConfig, err = RESTConfigFromKubeconfig(kubeconfig) + if err != nil { + return restConfig, err + } + } + + if cfg != nil { + restConfig.Burst = int(cfg.Burst) + restConfig.QPS = cfg.QPS + restConfig.AcceptContentTypes = cfg.AcceptContentTypes + restConfig.ContentType = cfg.ContentType + } + + return restConfig, nil +} + +// RESTConfigFromKubeconfig returns a rest.Config from the bytes of a kubeconfig +func RESTConfigFromKubeconfig(kubeconfig []byte) (*rest.Config, error) { + clientConfig, err := clientcmd.NewClientConfigFromBytes(kubeconfig) + if err != nil { + return nil, err + } + + if err := validateClientConfig(clientConfig); err != nil { + return nil, err + } + + restConfig, err := clientConfig.ClientConfig() + if err != nil { + return nil, err + } + return restConfig, nil +} + +func validateClientConfig(clientConfig clientcmd.ClientConfig) error { + rawConfig, err := clientConfig.RawConfig() + if err != nil { + return err + } + return ValidateConfig(rawConfig) +} + +// ValidateConfig validates that the auth info of a given kubeconfig doesn't have unsupported fields. +func ValidateConfig(config clientcmdapi.Config) error { + validFields := []string{"client-certificate-data", "client-key-data", "token", "username", "password"} + + for user, authInfo := range config.AuthInfos { + switch { + case authInfo.ClientCertificate != "": + return fmt.Errorf("client certificate files are not supported (user %q), these are the valid fields: %+v", user, validFields) + case authInfo.ClientKey != "": + return fmt.Errorf("client key files are not supported (user %q), these are the valid fields: %+v", user, validFields) + case authInfo.TokenFile != "": + return fmt.Errorf("token files are not supported (user %q), these are the valid fields: %+v", user, validFields) + case authInfo.Impersonate != "" || len(authInfo.ImpersonateGroups) > 0: + return fmt.Errorf("impersonation is not supported, these are the valid fields: %+v", validFields) + case authInfo.AuthProvider != nil && len(authInfo.AuthProvider.Config) > 0: + return fmt.Errorf("auth provider configurations are not supported (user %q), these are the valid fields: %+v", user, validFields) + case authInfo.Exec != nil: + return fmt.Errorf("exec configurations are not supported (user %q), these are the valid fields: %+v", user, validFields) + } + } + + return nil +} + +var supportedKubernetesVersions = []string{ + "1.10", + "1.11", + "1.12", + "1.13", + "1.14", + "1.15", + "1.16", + "1.17", + "1.18", + "1.19", + "1.20", +} + +func checkIfSupportedKubernetesVersion(gitVersion string) error { + for _, supportedVersion := range supportedKubernetesVersions { + ok, err := versionutils.CompareVersions(gitVersion, "~", supportedVersion) + if err != nil { + return err + } + + if ok { + return nil + } + } + return fmt.Errorf("unsupported kubernetes version %q", gitVersion) +} + +// NewWithConfig returns a new Kubernetes base client. +func NewWithConfig(fns ...ConfigFunc) (Interface, error) { + conf := &Config{} + + for _, f := range fns { + if err := f(conf); err != nil { + return nil, err + } + } + + return newClientSet(conf) +} + +func newClientSet(conf *Config) (Interface, error) { + if err := setConfigDefaults(conf); err != nil { + return nil, err + } + + runtimeCache, err := NewRuntimeCache(conf.restConfig, cache.Options{ + Scheme: conf.clientOptions.Scheme, + Mapper: conf.clientOptions.Mapper, + Resync: conf.cacheResync, + }) + if err != nil { + return nil, err + } + + directClient, err := client.New(conf.restConfig, conf.clientOptions) + if err != nil { + return nil, err + } + + var runtimeClient client.Client + if UseCachedRuntimeClients && !conf.disableCache { + runtimeClient, err = newRuntimeClientWithCache(conf.restConfig, conf.clientOptions, runtimeCache, conf.uncachedObjects...) + if err != nil { + return nil, err + } + } else { + runtimeClient = directClient + } + + // prepare rest config with contentType defaulted to protobuf for client-go style clients that either talk to + // kubernetes or aggregated APIs that support protobuf. + cfg := defaultContentTypeProtobuf(conf.restConfig) + + kubernetes, err := kubernetesclientset.NewForConfig(cfg) + if err != nil { + return nil, err + } + + gardenCore, err := gardencoreclientset.NewForConfig(cfg) + if err != nil { + return nil, err + } + + gardenSeedManagement, err := gardenseedmanagementclientset.NewForConfig(cfg) + if err != nil { + return nil, err + } + + apiRegistration, err := apiserviceclientset.NewForConfig(cfg) + if err != nil { + return nil, err + } + + apiExtension, err := apiextensionsclientset.NewForConfig(cfg) + if err != nil { + return nil, err + } + + cs := &clientSet{ + config: conf.restConfig, + restClient: kubernetes.Discovery().RESTClient(), + + applier: NewApplier(runtimeClient, conf.clientOptions.Mapper), + + client: runtimeClient, + directClient: directClient, + cache: runtimeCache, + + kubernetes: kubernetes, + gardenCore: gardenCore, + gardenSeedManagement: gardenSeedManagement, + apiregistration: apiRegistration, + apiextension: apiExtension, + } + + if _, err := cs.DiscoverVersion(); err != nil { + return nil, fmt.Errorf("error discovering kubernetes version: %w", err) + } + + return cs, nil +} + +func setConfigDefaults(conf *Config) error { + // we can't default to protobuf ContentType here, otherwise controller-runtime clients will also try to talk to + // CRD resources (e.g. extension CRs in the Seed) using protobuf, but CRDs don't support protobuf + // see https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility + return setClientOptionsDefaults(conf.restConfig, &conf.clientOptions) +} + +func defaultContentTypeProtobuf(c *rest.Config) *rest.Config { + config := *c + if config.ContentType == "" { + config.ContentType = runtime.ContentTypeProtobuf + } + return &config +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/clientset.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/clientset.go new file mode 100644 index 000000000..0b2cd3200 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/clientset.go @@ -0,0 +1,176 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "sync" + + "github.com/gardener/gardener/pkg/chartrenderer" + gardencoreclientset "github.com/gardener/gardener/pkg/client/core/clientset/versioned" + gardenseedmanagementclientset "github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned" + "github.com/gardener/gardener/pkg/logger" + + apiextensionclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + apiregistrationclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// clientSet is a struct containing the configuration for the respective Kubernetes +// cluster, the collection of Kubernetes clients containing all REST clients +// for the built-in Kubernetes API groups, and the Garden which is a REST clientSet +// for the Garden API group. +// The RESTClient itself is a normal HTTP client for the respective Kubernetes cluster, +// allowing requests to arbitrary URLs. +// The version string contains only the major/minor part in the form .. +type clientSet struct { + config *rest.Config + restClient rest.Interface + + applier Applier + chartApplier ChartApplier + chartRenderer chartrenderer.Interface + + // client is the default controller-runtime client which uses SharedIndexInformers to keep its cache in sync + client client.Client + // directClient is a client which can be used to make requests directly to the API server instead of reading from + // the client's cache + directClient client.Client + // cache is the client's cache + cache cache.Cache + + // startOnce guards starting the cache only once + startOnce sync.Once + + kubernetes kubernetes.Interface + gardenCore gardencoreclientset.Interface + gardenSeedManagement gardenseedmanagementclientset.Interface + apiextension apiextensionclientset.Interface + apiregistration apiregistrationclientset.Interface + + version string +} + +// Applier returns the Applier of this ClientSet. +func (c *clientSet) Applier() Applier { + return c.applier +} + +// ChartRenderer returns a ChartRenderer populated with the cluster's Capabilities. +func (c *clientSet) ChartRenderer() chartrenderer.Interface { + return c.chartRenderer +} + +// ChartApplier returns a ChartApplier using the ClientSet's ChartRenderer and Applier. +func (c *clientSet) ChartApplier() ChartApplier { + return c.chartApplier +} + +// RESTConfig will return the config attribute of the Client object. +func (c *clientSet) RESTConfig() *rest.Config { + return c.config +} + +// Client returns the controller-runtime client of this ClientSet. +func (c *clientSet) Client() client.Client { + return c.client +} + +// DirectClient returns a controller-runtime client, which can be used to talk to the API server directly +// (without using a cache). +func (c *clientSet) DirectClient() client.Client { + return c.directClient +} + +// Cache returns the ClientSet's controller-runtime cache. It can be used to get Informers for arbitrary objects. +func (c *clientSet) Cache() cache.Cache { + return c.cache +} + +// Kubernetes will return the kubernetes attribute of the Client object. +func (c *clientSet) Kubernetes() kubernetes.Interface { + return c.kubernetes +} + +// GardenCore will return the gardenCore attribute of the Client object. +func (c *clientSet) GardenCore() gardencoreclientset.Interface { + return c.gardenCore +} + +// GardenSeedManagement will return the gardenSeedManagement attribute of the Client object. +func (c *clientSet) GardenSeedManagement() gardenseedmanagementclientset.Interface { + return c.gardenSeedManagement +} + +// APIExtension will return the apiextensions attribute of the Client object. +func (c *clientSet) APIExtension() apiextensionclientset.Interface { + return c.apiextension +} + +// APIRegistration will return the apiregistration attribute of the Client object. +func (c *clientSet) APIRegistration() apiregistrationclientset.Interface { + return c.apiregistration +} + +// RESTClient will return the restClient attribute of the Client object. +func (c *clientSet) RESTClient() rest.Interface { + return c.restClient +} + +// Version returns the GitVersion of the Kubernetes client stored on the object. +func (c *clientSet) Version() string { + return c.version +} + +// DiscoverVersion tries to retrieve the server version of the targeted Kubernetes cluster and updates the +// ClientSet's saved version accordingly. Use Version if you only want to retrieve the kubernetes version instead +// of refreshing the ClientSet's saved version. +func (c *clientSet) DiscoverVersion() (*version.Info, error) { + serverVersion, err := c.kubernetes.Discovery().ServerVersion() + if err != nil { + return nil, err + } + + if err := checkIfSupportedKubernetesVersion(serverVersion.GitVersion); err != nil { + return nil, err + } + + c.version = serverVersion.GitVersion + c.chartRenderer = chartrenderer.NewWithServerVersion(serverVersion) + c.chartApplier = NewChartApplier(c.chartRenderer, c.applier) + + return serverVersion, nil +} + +// Start starts the cache of the ClientSet's controller-runtime client and returns immediately. +// It must be called first before using the client to retrieve objects from the API server. +func (c *clientSet) Start(ctx context.Context) { + c.startOnce.Do(func() { + go func() { + if err := c.cache.Start(ctx); err != nil { + logger.Logger.Errorf("cache.Start returned error, which should never happen, ignoring.") + } + }() + }) +} + +// WaitForCacheSync waits for the cache of the ClientSet's controller-runtime client to be synced. +func (c *clientSet) WaitForCacheSync(ctx context.Context) bool { + return c.cache.WaitForCacheSync(ctx) +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/deployments.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/deployments.go new file mode 100644 index 000000000..fef83f61f --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/deployments.go @@ -0,0 +1,64 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "fmt" + "time" + + "github.com/gardener/gardener/pkg/utils/retry" + + appsv1 "k8s.io/api/apps/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// HasDeploymentRolloutCompleted checks for the number of updated & +// available replicas to be equal to the deployment's desired replicas count. +// Thus confirming a successful rollout of the deployment. +func HasDeploymentRolloutCompleted(ctx context.Context, c client.Client, namespace, name string) (bool, error) { + var ( + deployment = &appsv1.Deployment{} + desiredReplicas = int32(0) + ) + + if err := c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, deployment); err != nil { + return retry.SevereError(err) + } + + if deployment.Spec.Replicas != nil { + desiredReplicas = *deployment.Spec.Replicas + } + + if deployment.Generation != deployment.Status.ObservedGeneration { + return retry.MinorError(fmt.Errorf("%q not observed at latest generation (%d/%d)", name, + deployment.Status.ObservedGeneration, deployment.Generation)) + } + + if deployment.Status.Replicas == desiredReplicas && deployment.Status.UpdatedReplicas == desiredReplicas && deployment.Status.AvailableReplicas == desiredReplicas { + return retry.Ok() + } + + return retry.MinorError(fmt.Errorf("deployment %q currently has Updated/Available: %d/%d replicas. Desired: %d", name, deployment.Status.UpdatedReplicas, deployment.Status.AvailableReplicas, desiredReplicas)) +} + +// WaitUntilDeploymentRolloutIsComplete waits for the number of updated & +// available replicas to be equal to the deployment's desired replicas count. +// It keeps retrying until timeout +func WaitUntilDeploymentRolloutIsComplete(ctx context.Context, client client.Client, namespace string, name string, interval, timeout time.Duration) error { + return retry.UntilTimeout(ctx, interval, timeout, func(ctx context.Context) (done bool, err error) { + return HasDeploymentRolloutCompleted(ctx, client, namespace, name) + }) +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/manifestoptions.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/manifestoptions.go new file mode 100644 index 000000000..0dc9affd7 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/manifestoptions.go @@ -0,0 +1,27 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +// DeleteManifestOption is some configuration that modifies options for a delete request. +type DeleteManifestOption interface { + // MutateDeleteOptions applies this configuration to the given delete options. + MutateDeleteManifestOptions(opts *DeleteManifestOptions) +} + +// DeleteOptions contains options for delete requests +type DeleteManifestOptions struct { + // TolerateErrorFuncs are functions for which errors are tolerated. + TolerateErrorFuncs []TolerateErrorFunc +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/options.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/options.go new file mode 100644 index 000000000..842aacb3a --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/options.go @@ -0,0 +1,101 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "errors" + "time" + + "k8s.io/client-go/rest" + baseconfig "k8s.io/component-base/config" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Config carries options for new ClientSets. +type Config struct { + clientOptions client.Options + restConfig *rest.Config + cacheResync *time.Duration + disableCache bool + uncachedObjects []client.Object +} + +// NewConfig returns a new Config with an empty REST config to allow testing ConfigFuncs without exporting +// the fields of the Config type. +func NewConfig() *Config { + return &Config{restConfig: &rest.Config{}} +} + +// ConfigFunc is a function that mutates a Config struct. +// It implements the functional options pattern. See +// https://github.com/tmrts/go-patterns/blob/master/idiom/functional-options.md. +type ConfigFunc func(config *Config) error + +// WithRESTConfig returns a ConfigFunc that sets the passed rest.Config on the Config object. +func WithRESTConfig(restConfig *rest.Config) ConfigFunc { + return func(config *Config) error { + config.restConfig = restConfig + return nil + } +} + +// WithClientConnectionOptions returns a ConfigFunc that transfers settings from +// the passed ClientConnectionConfiguration. +// The kubeconfig location in ClientConnectionConfiguration is disregarded, though! +func WithClientConnectionOptions(cfg baseconfig.ClientConnectionConfiguration) ConfigFunc { + return func(config *Config) error { + if config.restConfig == nil { + return errors.New("REST config must be set before setting connection options") + } + config.restConfig.Burst = int(cfg.Burst) + config.restConfig.QPS = cfg.QPS + config.restConfig.AcceptContentTypes = cfg.AcceptContentTypes + config.restConfig.ContentType = cfg.ContentType + return nil + } +} + +// WithClientOptions returns a ConfigFunc that sets the passed Options on the Config object. +func WithClientOptions(opt client.Options) ConfigFunc { + return func(config *Config) error { + config.clientOptions = opt + return nil + } +} + +// WithCacheResyncPeriod returns a ConfigFunc that set the client's cache's resync period to the given duration. +func WithCacheResyncPeriod(resync time.Duration) ConfigFunc { + return func(config *Config) error { + config.cacheResync = &resync + return nil + } +} + +// WithDisabledCachedClient disables the cache in the controller-runtime client, so Client() will be equivalent to +// DirectClient(). +func WithDisabledCachedClient() ConfigFunc { + return func(config *Config) error { + config.disableCache = true + return nil + } +} + +// WithUncached disables the cached client for the specified objects' GroupKinds. +func WithUncached(objs ...client.Object) ConfigFunc { + return func(config *Config) error { + config.uncachedObjects = append(config.uncachedObjects, objs...) + return nil + } +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/pods.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/pods.go new file mode 100644 index 000000000..f329bf754 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/pods.go @@ -0,0 +1,171 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/gardener/gardener/pkg/utils" + + corev1 "k8s.io/api/core/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/tools/remotecommand" + "k8s.io/client-go/transport/spdy" +) + +// NewPodExecutor returns a podExecutor +func NewPodExecutor(config *rest.Config) PodExecutor { + return &podExecutor{ + config: config, + } +} + +// PodExecutor is the pod executor interface +type PodExecutor interface { + Execute(namespace, name, containerName, command, commandArg string) (io.Reader, error) +} + +type podExecutor struct { + config *rest.Config +} + +// Execute executes a command on a pod +func (p *podExecutor) Execute(namespace, name, containerName, command, commandArg string) (io.Reader, error) { + client, err := corev1client.NewForConfig(p.config) + if err != nil { + return nil, err + } + + var stdout, stderr bytes.Buffer + request := client.RESTClient(). + Post(). + Resource("pods"). + Name(name). + Namespace(namespace). + SubResource("exec"). + Param("container", containerName). + Param("command", command). + Param("stdin", "true"). + Param("stdout", "true"). + Param("stderr", "true"). + Param("tty", "false") + + executor, err := remotecommand.NewSPDYExecutor(p.config, http.MethodPost, request.URL()) + if err != nil { + return nil, fmt.Errorf("failed to initialized the command exector: %v", err) + } + + err = executor.Stream(remotecommand.StreamOptions{ + Stdin: strings.NewReader(commandArg), + Stdout: &stdout, + Stderr: &stderr, + Tty: false, + }) + if err != nil { + return &stderr, err + } + + return &stdout, nil +} + +// GetPodLogs retrieves the pod logs of the pod of the given name with the given options. +func GetPodLogs(ctx context.Context, podInterface corev1client.PodInterface, name string, options *corev1.PodLogOptions) ([]byte, error) { + request := podInterface.GetLogs(name, options) + + stream, err := request.Stream(ctx) + if err != nil { + return nil, err + } + defer func() { utilruntime.HandleError(stream.Close()) }() + + return ioutil.ReadAll(stream) +} + +// ForwardPodPort tries to forward the port of the pod with name in namespace to +// the port. If equals zero, a free port will be chosen randomly. +// It returns the stop channel which must be closed when the port forward connection should be terminated. +func (c *clientSet) ForwardPodPort(namespace, name string, local, remote int) (chan struct{}, error) { + fw, stopChan, err := c.setupForwardPodPort(namespace, name, local, remote) + if err != nil { + return nil, err + } + return stopChan, fw.ForwardPorts() +} + +// CheckForwardPodPort tries to forward the port of the pod with name in namespace to +// the port. If equals zero, a free port will be chosen randomly. +// It returns true if the port forward connection has been established successfully or false otherwise. +func (c *clientSet) CheckForwardPodPort(namespace, name string, local, remote int) error { + fw, stopChan, err := c.setupForwardPodPort(namespace, name, local, remote) + if err != nil { + return fmt.Errorf("could not setup pod port forwarding: %v", err) + } + + errChan := make(chan error) + go func() { + errChan <- fw.ForwardPorts() + }() + defer close(stopChan) + + select { + case err = <-errChan: + return fmt.Errorf("error forwarding ports: %v", err) + case <-fw.Ready: + return nil + case <-time.After(time.Second * 5): + return errors.New("port forward connection could not be established within five seconds") + } +} + +func (c *clientSet) setupForwardPodPort(namespace, name string, local, remote int) (*portforward.PortForwarder, chan struct{}, error) { + var ( + stopChan = make(chan struct{}, 1) + readyChan = make(chan struct{}, 1) + out = ioutil.Discard + localPort int + ) + + u := c.kubernetes.CoreV1().RESTClient().Post().Resource("pods").Namespace(namespace).Name(name).SubResource("portforward").URL() + + transport, upgrader, err := spdy.RoundTripperFor(c.config) + if err != nil { + return nil, nil, err + } + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", u) + + if local == 0 { + localPort, err = utils.FindFreePort() + if err != nil { + return nil, nil, err + } + } + + fw, err := portforward.New(dialer, []string{fmt.Sprintf("%d:%d", localPort, remote)}, stopChan, readyChan, out, out) + if err != nil { + return nil, nil, err + } + return fw, stopChan, nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/runtime_client.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/runtime_client.go new file mode 100644 index 000000000..255961dff --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/runtime_client.go @@ -0,0 +1,157 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/gardener/gardener/pkg/logger" + + "github.com/sirupsen/logrus" + "golang.org/x/time/rate" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +const ( + defaultCacheResyncPeriod = 6 * time.Hour +) + +// NewDirectClient creates a new client.Client which can be used to talk to the API directly (without a cache). +func NewDirectClient(config *rest.Config, options client.Options) (client.Client, error) { + if err := setClientOptionsDefaults(config, &options); err != nil { + return nil, err + } + + return client.New(config, options) +} + +// NewRuntimeClientWithCache creates a new client.client with the given config and options. +// The client uses a new cache, which will be started immediately using the given context. +func NewRuntimeClientWithCache(ctx context.Context, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) { + if err := setClientOptionsDefaults(config, &options); err != nil { + return nil, err + } + + clientCache, err := NewRuntimeCache(config, cache.Options{ + Scheme: options.Scheme, + Mapper: options.Mapper, + }) + if err != nil { + return nil, fmt.Errorf("could not create new client cache: %w", err) + } + + runtimeClient, err := newRuntimeClientWithCache(config, options, clientCache, uncachedObjects...) + if err != nil { + return nil, err + } + + go func() { + if err := clientCache.Start(ctx); err != nil { + logger.NewLogger(fmt.Sprint(logrus.ErrorLevel)).Errorf("cache.Start returned error, which should never happen, ignoring.") + } + }() + + clientCache.WaitForCacheSync(ctx) + + return runtimeClient, nil +} + +func newRuntimeClientWithCache(config *rest.Config, options client.Options, cache cache.Cache, uncachedObjects ...client.Object) (client.Client, error) { + return manager.NewClientBuilder().WithUncached(uncachedObjects...).Build(cache, config, options) +} + +func setClientOptionsDefaults(config *rest.Config, options *client.Options) error { + if options.Mapper == nil { + // default the client's REST mapper to a dynamic REST mapper (automatically rediscovers resources on NoMatchErrors) + mapper, err := apiutil.NewDynamicRESTMapper( + config, + apiutil.WithLazyDiscovery, + apiutil.WithLimiter(rate.NewLimiter(rate.Every(5*time.Second), 1)), + ) + if err != nil { + return fmt.Errorf("failed to create new DynamicRESTMapper: %w", err) + } + options.Mapper = mapper + } + + return nil +} + +// NewRuntimeCache creates a new cache.Cache with the given config and options. It can be used +// for creating new controller-runtime clients with caches. +func NewRuntimeCache(config *rest.Config, options cache.Options) (cache.Cache, error) { + if err := setCacheOptionsDefaults(&options); err != nil { + return nil, err + } + + return cache.New(config, options) +} + +func setCacheOptionsDefaults(options *cache.Options) error { + if options.Resync == nil { + resync := defaultCacheResyncPeriod + options.Resync = &resync + } + + return nil +} + +// NewDirectClientFromSecret creates a new controller runtime Client struct for a given secret. +func NewDirectClientFromSecret(secret *corev1.Secret, fns ...ConfigFunc) (client.Client, error) { + if kubeconfig, ok := secret.Data[KubeConfig]; ok { + return NewDirectClientFromBytes(kubeconfig, fns...) + } + return nil, errors.New("no valid kubeconfig found") +} + +// NewDirectClientFromBytes creates a new controller runtime Client struct for a given kubeconfig byte slice. +func NewDirectClientFromBytes(kubeconfig []byte, fns ...ConfigFunc) (client.Client, error) { + clientConfig, err := clientcmd.NewClientConfigFromBytes(kubeconfig) + if err != nil { + return nil, err + } + + if err := validateClientConfig(clientConfig); err != nil { + return nil, err + } + + config, err := clientConfig.ClientConfig() + if err != nil { + return nil, err + } + + opts := append([]ConfigFunc{WithRESTConfig(config)}, fns...) + return NewDirectClientWithConfig(opts...) +} + +// NewDirectClientWithConfig returns a new controller runtime client from a config. +func NewDirectClientWithConfig(fns ...ConfigFunc) (client.Client, error) { + conf := &Config{} + for _, f := range fns { + if err := f(conf); err != nil { + return nil, err + } + } + return NewDirectClient(conf.restConfig, conf.clientOptions) +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/scaling.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/scaling.go new file mode 100644 index 000000000..2bd159921 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/scaling.go @@ -0,0 +1,99 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + "fmt" + "time" + + druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1" + "github.com/gardener/gardener/pkg/utils/retry" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ScaleStatefulSet scales a StatefulSet. +func ScaleStatefulSet(ctx context.Context, c client.Client, key client.ObjectKey, replicas int32) error { + statefulset := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + } + + return scaleResource(ctx, c, statefulset, replicas) +} + +// ScaleEtcd scales a Etcd resource. +func ScaleEtcd(ctx context.Context, c client.Client, key client.ObjectKey, replicas int) error { + etcd := &druidv1alpha1.Etcd{ + ObjectMeta: metav1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + } + + return scaleResource(ctx, c, etcd, int32(replicas)) +} + +// ScaleDeployment scales a Deployment. +func ScaleDeployment(ctx context.Context, c client.Client, key client.ObjectKey, replicas int32) error { + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + } + + return scaleResource(ctx, c, deployment, replicas) +} + +// scaleResource scales resource's 'spec.replicas' to replicas count +func scaleResource(ctx context.Context, c client.Client, obj client.Object, replicas int32) error { + patch := []byte(fmt.Sprintf(`{"spec":{"replicas":%d}}`, replicas)) + + // TODO: replace this with call to scale subresource once controller-runtime supports it + // see: https://github.com/kubernetes-sigs/controller-runtime/issues/172 + return c.Patch(ctx, obj, client.RawPatch(types.MergePatchType, patch)) +} + +// WaitUntilDeploymentScaledToDesiredReplicas waits for the number of available replicas to be equal to the deployment's desired replicas count. +func WaitUntilDeploymentScaledToDesiredReplicas(ctx context.Context, client client.Client, key types.NamespacedName, desiredReplicas int32) error { + return retry.UntilTimeout(ctx, 5*time.Second, 300*time.Second, func(ctx context.Context) (done bool, err error) { + deployment := &appsv1.Deployment{} + if err := client.Get(ctx, key, deployment); err != nil { + return retry.SevereError(err) + } + + if deployment.Generation != deployment.Status.ObservedGeneration { + return retry.MinorError(fmt.Errorf("%q not observed at latest generation (%d/%d)", key.Name, + deployment.Status.ObservedGeneration, deployment.Generation)) + } + + if deployment.Spec.Replicas == nil || *deployment.Spec.Replicas != desiredReplicas { + return retry.SevereError(fmt.Errorf("waiting for deployment %q to scale failed. spec.replicas does not match the desired replicas", key.Name)) + } + + if deployment.Status.Replicas == desiredReplicas && deployment.Status.AvailableReplicas == desiredReplicas { + return retry.Ok() + } + + return retry.MinorError(fmt.Errorf("deployment %q currently has '%d' replicas. Desired: %d", key.Name, deployment.Status.AvailableReplicas, desiredReplicas)) + }) +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/types.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/types.go new file mode 100644 index 000000000..d1c6b3798 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/types.go @@ -0,0 +1,183 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "context" + + "github.com/gardener/gardener/pkg/chartrenderer" + gardencoreclientset "github.com/gardener/gardener/pkg/client/core/clientset/versioned" + gardencorescheme "github.com/gardener/gardener/pkg/client/core/clientset/versioned/scheme" + gardenextensionsscheme "github.com/gardener/gardener/pkg/client/extensions/clientset/versioned/scheme" + gardenseedmanagementclientset "github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned" + gardenseedmanagementscheme "github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/scheme" + + druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1" + dnsv1alpha1 "github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1" + resourcesscheme "github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1" + hvpav1alpha1 "github.com/gardener/hvpa-controller/api/v1alpha1" + istionetworkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" + apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apiextensionsscheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/version" + autoscalingscheme "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" + kubernetesclientset "k8s.io/client-go/kubernetes" + corescheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + apiregistrationclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" + apiregistrationscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + // GardenScheme is the scheme used in the Garden cluster. + GardenScheme = runtime.NewScheme() + // SeedScheme is the scheme used in the Seed cluster. + SeedScheme = runtime.NewScheme() + // ShootScheme is the scheme used in the Shoot cluster. + ShootScheme = runtime.NewScheme() + // PlantScheme is the scheme used in the Plant cluster + PlantScheme = runtime.NewScheme() + + // DefaultDeleteOptions use foreground propagation policy and grace period of 60 seconds. + DefaultDeleteOptions = []client.DeleteOption{ + client.PropagationPolicy(metav1.DeletePropagationForeground), + client.GracePeriodSeconds(60), + } + // ForceDeleteOptions use background propagation policy and grace period of 0 seconds. + ForceDeleteOptions = []client.DeleteOption{ + client.PropagationPolicy(metav1.DeletePropagationBackground), + client.GracePeriodSeconds(0), + } + + // SeedSerializer is a YAML serializer using the Seed scheme. + SeedSerializer = json.NewSerializerWithOptions(json.DefaultMetaFactory, SeedScheme, SeedScheme, json.SerializerOptions{Yaml: true, Pretty: false, Strict: false}) + // SeedCodec is a codec factory using the Seed scheme. + SeedCodec = serializer.NewCodecFactory(SeedScheme) + + // ShootSerializer is a YAML serializer using the Shoot scheme. + ShootSerializer = json.NewSerializerWithOptions(json.DefaultMetaFactory, ShootScheme, ShootScheme, json.SerializerOptions{Yaml: true, Pretty: false, Strict: false}) + // ShootCodec is a codec factory using the Shoot scheme. + ShootCodec = serializer.NewCodecFactory(ShootScheme) +) + +// DefaultGetOptions are the default options for GET requests. +func DefaultGetOptions() metav1.GetOptions { return metav1.GetOptions{} } + +// DefaultCreateOptions are the default options for CREATE requests. +func DefaultCreateOptions() metav1.CreateOptions { return metav1.CreateOptions{} } + +// DefaultUpdateOptions are the default options for UPDATE requests. +func DefaultUpdateOptions() metav1.UpdateOptions { return metav1.UpdateOptions{} } + +func init() { + gardenSchemeBuilder := runtime.NewSchemeBuilder( + corescheme.AddToScheme, + gardencorescheme.AddToScheme, + gardenseedmanagementscheme.AddToScheme, + ) + utilruntime.Must(gardenSchemeBuilder.AddToScheme(GardenScheme)) + + seedSchemeBuilder := runtime.NewSchemeBuilder( + corescheme.AddToScheme, + dnsv1alpha1.AddToScheme, + gardenextensionsscheme.AddToScheme, + resourcesscheme.AddToScheme, + autoscalingscheme.AddToScheme, + hvpav1alpha1.AddToScheme, + druidv1alpha1.AddToScheme, + apiextensionsscheme.AddToScheme, + istionetworkingv1beta1.AddToScheme, + ) + utilruntime.Must(seedSchemeBuilder.AddToScheme(SeedScheme)) + + shootSchemeBuilder := runtime.NewSchemeBuilder( + corescheme.AddToScheme, + apiextensionsscheme.AddToScheme, + apiregistrationscheme.AddToScheme, + autoscalingscheme.AddToScheme, + ) + utilruntime.Must(shootSchemeBuilder.AddToScheme(ShootScheme)) + + plantSchemeBuilder := runtime.NewSchemeBuilder( + corescheme.AddToScheme, + gardencorescheme.AddToScheme, + ) + utilruntime.Must(plantSchemeBuilder.AddToScheme(PlantScheme)) +} + +// MergeFunc determines how oldOj is merged into new oldObj. +type MergeFunc func(newObj, oldObj *unstructured.Unstructured) + +// Applier is an interface which describes declarative operations to apply multiple +// Kubernetes objects. +type Applier interface { + ApplyManifest(ctx context.Context, unstructured UnstructuredReader, options MergeFuncs) error + DeleteManifest(ctx context.Context, unstructured UnstructuredReader, opts ...DeleteManifestOption) error +} + +// Interface is used to wrap the interactions with a Kubernetes cluster +// (which are performed with the help of kubernetes/client-go) in order to allow the implementation +// of several Kubernetes versions. +type Interface interface { + RESTConfig() *rest.Config + RESTClient() rest.Interface + + // Client returns the ClientSet's controller-runtime client. This client should be used by default, as it carries + // a cache, which uses SharedIndexInformers to keep up-to-date. + Client() client.Client + // DirectClient returns a controller-runtime client, which can be used to talk to the API server directly + // (without using a cache). + DirectClient() client.Client + // Cache returns the ClientSet's controller-runtime cache. It can be used to get Informers for arbitrary objects. + Cache() cache.Cache + + // Applier returns an Applier which uses the ClientSet's client. + Applier() Applier + // ChartRenderer returns a ChartRenderer populated with the cluster's Capabilities. + ChartRenderer() chartrenderer.Interface + // ChartApplier returns a ChartApplier using the ClientSet's ChartRenderer and Applier. + ChartApplier() ChartApplier + + Kubernetes() kubernetesclientset.Interface + GardenCore() gardencoreclientset.Interface + GardenSeedManagement() gardenseedmanagementclientset.Interface + APIExtension() apiextensionsclientset.Interface + APIRegistration() apiregistrationclientset.Interface + + // Deprecated: Use `Client()` and utils instead. + ForwardPodPort(string, string, int, int) (chan struct{}, error) + CheckForwardPodPort(string, string, int, int) error + + // Version returns the server version of the targeted Kubernetes cluster. + Version() string + // DiscoverVersion tries to retrieve the server version of the targeted Kubernetes cluster and updates the + // ClientSet's saved version accordingly. Use Version if you only want to retrieve the kubernetes version instead + // of refreshing the ClientSet's saved version. + DiscoverVersion() (*version.Info, error) + + // Start starts the cache of the ClientSet's controller-runtime client and returns immediately. + // It must be called first before using the client to retrieve objects from the API server. + Start(ctx context.Context) + // WaitForCacheSync waits for the cache of the ClientSet's controller-runtime client to be synced. + WaitForCacheSync(ctx context.Context) bool +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/clientset.go b/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/clientset.go new file mode 100644 index 000000000..dbfbaa5a8 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/clientset.go @@ -0,0 +1,97 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + + seedmanagementv1alpha1 "github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/typed/seedmanagement/v1alpha1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + SeedmanagementV1alpha1() seedmanagementv1alpha1.SeedmanagementV1alpha1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + seedmanagementV1alpha1 *seedmanagementv1alpha1.SeedmanagementV1alpha1Client +} + +// SeedmanagementV1alpha1 retrieves the SeedmanagementV1alpha1Client +func (c *Clientset) SeedmanagementV1alpha1() seedmanagementv1alpha1.SeedmanagementV1alpha1Interface { + return c.seedmanagementV1alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.seedmanagementV1alpha1, err = seedmanagementv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.seedmanagementV1alpha1 = seedmanagementv1alpha1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.seedmanagementV1alpha1 = seedmanagementv1alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/doc.go b/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/doc.go new file mode 100644 index 000000000..32d852285 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/scheme/doc.go b/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/scheme/doc.go new file mode 100644 index 000000000..7d4fb776b --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/scheme/register.go b/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/scheme/register.go new file mode 100644 index 000000000..cc1a9dd2d --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/scheme/register.go @@ -0,0 +1,56 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + seedmanagementv1alpha1 "github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + seedmanagementv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/typed/seedmanagement/v1alpha1/doc.go b/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/typed/seedmanagement/v1alpha1/doc.go new file mode 100644 index 000000000..828c8ebe1 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/typed/seedmanagement/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/typed/seedmanagement/v1alpha1/generated_expansion.go b/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/typed/seedmanagement/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..9de4c2bce --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/typed/seedmanagement/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type ManagedSeedExpansion interface{} diff --git a/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/typed/seedmanagement/v1alpha1/managedseed.go b/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/typed/seedmanagement/v1alpha1/managedseed.go new file mode 100644 index 000000000..0c8039896 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/typed/seedmanagement/v1alpha1/managedseed.go @@ -0,0 +1,195 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1" + scheme "github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ManagedSeedsGetter has a method to return a ManagedSeedInterface. +// A group's client should implement this interface. +type ManagedSeedsGetter interface { + ManagedSeeds(namespace string) ManagedSeedInterface +} + +// ManagedSeedInterface has methods to work with ManagedSeed resources. +type ManagedSeedInterface interface { + Create(ctx context.Context, managedSeed *v1alpha1.ManagedSeed, opts v1.CreateOptions) (*v1alpha1.ManagedSeed, error) + Update(ctx context.Context, managedSeed *v1alpha1.ManagedSeed, opts v1.UpdateOptions) (*v1alpha1.ManagedSeed, error) + UpdateStatus(ctx context.Context, managedSeed *v1alpha1.ManagedSeed, opts v1.UpdateOptions) (*v1alpha1.ManagedSeed, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ManagedSeed, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ManagedSeedList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ManagedSeed, err error) + ManagedSeedExpansion +} + +// managedSeeds implements ManagedSeedInterface +type managedSeeds struct { + client rest.Interface + ns string +} + +// newManagedSeeds returns a ManagedSeeds +func newManagedSeeds(c *SeedmanagementV1alpha1Client, namespace string) *managedSeeds { + return &managedSeeds{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the managedSeed, and returns the corresponding managedSeed object, and an error if there is any. +func (c *managedSeeds) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ManagedSeed, err error) { + result = &v1alpha1.ManagedSeed{} + err = c.client.Get(). + Namespace(c.ns). + Resource("managedseeds"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ManagedSeeds that match those selectors. +func (c *managedSeeds) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ManagedSeedList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ManagedSeedList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("managedseeds"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested managedSeeds. +func (c *managedSeeds) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("managedseeds"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a managedSeed and creates it. Returns the server's representation of the managedSeed, and an error, if there is any. +func (c *managedSeeds) Create(ctx context.Context, managedSeed *v1alpha1.ManagedSeed, opts v1.CreateOptions) (result *v1alpha1.ManagedSeed, err error) { + result = &v1alpha1.ManagedSeed{} + err = c.client.Post(). + Namespace(c.ns). + Resource("managedseeds"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(managedSeed). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a managedSeed and updates it. Returns the server's representation of the managedSeed, and an error, if there is any. +func (c *managedSeeds) Update(ctx context.Context, managedSeed *v1alpha1.ManagedSeed, opts v1.UpdateOptions) (result *v1alpha1.ManagedSeed, err error) { + result = &v1alpha1.ManagedSeed{} + err = c.client.Put(). + Namespace(c.ns). + Resource("managedseeds"). + Name(managedSeed.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(managedSeed). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *managedSeeds) UpdateStatus(ctx context.Context, managedSeed *v1alpha1.ManagedSeed, opts v1.UpdateOptions) (result *v1alpha1.ManagedSeed, err error) { + result = &v1alpha1.ManagedSeed{} + err = c.client.Put(). + Namespace(c.ns). + Resource("managedseeds"). + Name(managedSeed.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(managedSeed). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the managedSeed and deletes it. Returns an error if one occurs. +func (c *managedSeeds) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("managedseeds"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *managedSeeds) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("managedseeds"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched managedSeed. +func (c *managedSeeds) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ManagedSeed, err error) { + result = &v1alpha1.ManagedSeed{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("managedseeds"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/typed/seedmanagement/v1alpha1/seedmanagement_client.go b/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/typed/seedmanagement/v1alpha1/seedmanagement_client.go new file mode 100644 index 000000000..d7e7ee7ca --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/typed/seedmanagement/v1alpha1/seedmanagement_client.go @@ -0,0 +1,89 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/gardener/gardener/pkg/apis/seedmanagement/v1alpha1" + "github.com/gardener/gardener/pkg/client/seedmanagement/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type SeedmanagementV1alpha1Interface interface { + RESTClient() rest.Interface + ManagedSeedsGetter +} + +// SeedmanagementV1alpha1Client is used to interact with features provided by the seedmanagement.gardener.cloud group. +type SeedmanagementV1alpha1Client struct { + restClient rest.Interface +} + +func (c *SeedmanagementV1alpha1Client) ManagedSeeds(namespace string) ManagedSeedInterface { + return newManagedSeeds(c, namespace) +} + +// NewForConfig creates a new SeedmanagementV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*SeedmanagementV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &SeedmanagementV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new SeedmanagementV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SeedmanagementV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SeedmanagementV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *SeedmanagementV1alpha1Client { + return &SeedmanagementV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SeedmanagementV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/gardener/gardener/pkg/client/settings/clientset/versioned/scheme/doc.go b/vendor/github.com/gardener/gardener/pkg/client/settings/clientset/versioned/scheme/doc.go new file mode 100644 index 000000000..7d4fb776b --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/settings/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/gardener/gardener/pkg/client/settings/clientset/versioned/scheme/register.go b/vendor/github.com/gardener/gardener/pkg/client/settings/clientset/versioned/scheme/register.go new file mode 100644 index 000000000..89313a4d1 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/client/settings/clientset/versioned/scheme/register.go @@ -0,0 +1,56 @@ +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + settingsv1alpha1 "github.com/gardener/gardener/pkg/apis/settings/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + settingsv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/doc.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/doc.go new file mode 100644 index 000000000..3e61bbf75 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/doc.go @@ -0,0 +1,18 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package +// +groupName=gardenlet.config.gardener.cloud + +package config // import "github.com/gardener/gardener/pkg/gardenlet/apis/config" diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/helper/helpers.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/helper/helpers.go new file mode 100644 index 000000000..e872e6cf4 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/helper/helpers.go @@ -0,0 +1,84 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package helper + +import ( + "fmt" + + gardencore "github.com/gardener/gardener/pkg/apis/core" + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + "github.com/gardener/gardener/pkg/gardenlet/apis/config" + configv1alpha1 "github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +// SeedNameFromSeedConfig returns an empty string if the given seed config is nil, or the +// name inside the seed config. +func SeedNameFromSeedConfig(seedConfig *config.SeedConfig) string { + if seedConfig == nil { + return "" + } + return seedConfig.SeedTemplate.Name +} + +// StaleExtensionHealthChecksThreshold returns nil if the given config is nil or the check +// for stale health checks is not enabled. Otherwise it returns the threshold from the given config. +func StaleExtensionHealthChecksThreshold(c *config.StaleExtensionHealthChecks) *metav1.Duration { + if c != nil && c.Enabled { + return c.Threshold + } + + return nil +} + +var scheme *runtime.Scheme + +func init() { + scheme = runtime.NewScheme() + // core schemes are needed here to properly convert the embedded SeedTemplate objects + utilruntime.Must(gardencore.AddToScheme(scheme)) + utilruntime.Must(gardencorev1beta1.AddToScheme(scheme)) + utilruntime.Must(config.AddToScheme(scheme)) + utilruntime.Must(configv1alpha1.AddToScheme(scheme)) +} + +// ConvertGardenletConfiguration converts the given object to an internal GardenletConfiguration version. +func ConvertGardenletConfiguration(obj runtime.Object) (*config.GardenletConfiguration, error) { + obj, err := scheme.ConvertToVersion(obj, config.SchemeGroupVersion) + if err != nil { + return nil, err + } + result, ok := obj.(*config.GardenletConfiguration) + if !ok { + return nil, fmt.Errorf("could not convert GardenletConfiguration to internal version") + } + return result, nil +} + +// ConvertGardenletConfigurationExternal converts the given object to an external GardenletConfiguration version. +func ConvertGardenletConfigurationExternal(obj runtime.Object) (*configv1alpha1.GardenletConfiguration, error) { + obj, err := scheme.ConvertToVersion(obj, configv1alpha1.SchemeGroupVersion) + if err != nil { + return nil, err + } + result, ok := obj.(*configv1alpha1.GardenletConfiguration) + if !ok { + return nil, fmt.Errorf("could not convert GardenletConfiguration to version %s", configv1alpha1.SchemeGroupVersion.String()) + } + return result, nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/register.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/register.go new file mode 100644 index 000000000..cc2edebe3 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/register.go @@ -0,0 +1,51 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "gardenlet.config.gardener.cloud" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder used to register the Shoot resource. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a pointer to SchemeBuilder.AddToScheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &GardenletConfiguration{}, + ) + return nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/types.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/types.go new file mode 100644 index 000000000..4ee1dfd64 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/types.go @@ -0,0 +1,361 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + gardencore "github.com/gardener/gardener/pkg/apis/core" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + componentbaseconfig "k8s.io/component-base/config" + "k8s.io/klog" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GardenletConfiguration defines the configuration for the Gardenlet. +type GardenletConfiguration struct { + metav1.TypeMeta + // GardenClientConnection specifies the kubeconfig file and the client connection settings + // for the proxy server to use when communicating with the garden apiserver. + GardenClientConnection *GardenClientConnection + // SeedClientConnection specifies the client connection settings for the proxy server + // to use when communicating with the seed apiserver. + SeedClientConnection *SeedClientConnection + // ShootClientConnection specifies the client connection settings for the proxy server + // to use when communicating with the shoot apiserver. + ShootClientConnection *ShootClientConnection + // Controllers defines the configuration of the controllers. + Controllers *GardenletControllerConfiguration + // Resources defines the total capacity for seed resources and the amount reserved for use by Gardener. + Resources *ResourcesConfiguration + // LeaderElection defines the configuration of leader election client. + LeaderElection *LeaderElectionConfiguration + // LogLevel is the level/severity for the logs. Must be one of [info,debug,error]. + LogLevel *string + // KubernetesLogLevel is the log level used for Kubernetes' k8s.io/klog functions. + KubernetesLogLevel *klog.Level + // Server defines the configuration of the HTTP server. + Server *ServerConfiguration + // FeatureGates is a map of feature names to bools that enable or disable alpha/experimental + // features. This field modifies piecemeal the built-in default values from + // "github.com/gardener/gardener/pkg/gardenlet/features/features.go". + // Default: nil + FeatureGates map[string]bool + // SeedConfig contains configuration for the seed cluster. Must not be set if seed selector is set. + // In this case the gardenlet creates the `Seed` object itself based on the provided config. + SeedConfig *SeedConfig + // SeedSelector contains an optional list of labels on `Seed` resources that shall be managed by + // this gardenlet instance. In this case the `Seed` object is not managed by the Gardenlet and must + // be created by an operator/administrator. + SeedSelector *metav1.LabelSelector + // Logging contains an optional configurations for the logging stack deployed + // by the Gardenlet in the seed clusters. + Logging *Logging + // SNI contains an optional configuration for the APIServerSNI feature used + // by the Gardenlet in the seed clusters. + SNI *SNI +} + +// GardenClientConnection specifies the kubeconfig file and the client connection settings +// for the proxy server to use when communicating with the garden apiserver. +type GardenClientConnection struct { + componentbaseconfig.ClientConnectionConfiguration + // GardenClusterAddress is the external address that the gardenlets can use to remotely connect to the Garden + // cluster. It is needed in case the gardenlet deploys itself into shooted seeds. + GardenClusterAddress *string + // GardenClusterCACert is the external address that the gardenlets can use to remotely connect to the Garden + // cluster. It is needed in case the gardenlet deploys itself into shooted seeds. + GardenClusterCACert []byte + // BootstrapKubeconfig is a reference to a secret that contains a data key 'kubeconfig' whose value + // is a kubeconfig that can be used for bootstrapping. If `kubeconfig` is given then only this kubeconfig + // will be considered. + BootstrapKubeconfig *corev1.SecretReference + // KubeconfigSecret is the reference to a secret object that stores the gardenlet's kubeconfig that + // it uses to communicate with the garden cluster. If `kubeconfig` is given then only this kubeconfig + // will be considered. + KubeconfigSecret *corev1.SecretReference +} + +// SeedClientConnection specifies the client connection settings +// for the proxy server to use when communicating with the seed apiserver. +type SeedClientConnection struct { + componentbaseconfig.ClientConnectionConfiguration +} + +// ShootClientConnection specifies the client connection settings +// for the proxy server to use when communicating with the shoot apiserver. +type ShootClientConnection struct { + componentbaseconfig.ClientConnectionConfiguration +} + +// GardenletControllerConfiguration defines the configuration of the controllers. +type GardenletControllerConfiguration struct { + // BackupBucket defines the configuration of the BackupBucket controller. + BackupBucket *BackupBucketControllerConfiguration + // BackupEntry defines the configuration of the BackupEntry controller. + BackupEntry *BackupEntryControllerConfiguration + // ControllerInstallation defines the configuration of the ControllerInstallation controller. + ControllerInstallation *ControllerInstallationControllerConfiguration + // ControllerInstallationCare defines the configuration of the ControllerInstallationCare controller. + ControllerInstallationCare *ControllerInstallationCareControllerConfiguration + // ControllerInstallationRequired defines the configuration of the ControllerInstallationRequired controller. + ControllerInstallationRequired *ControllerInstallationRequiredControllerConfiguration + // Seed defines the configuration of the Seed controller. + Seed *SeedControllerConfiguration + // Shoot defines the configuration of the Shoot controller. + Shoot *ShootControllerConfiguration + // ShootCare defines the configuration of the ShootCare controller. + ShootCare *ShootCareControllerConfiguration + // ShootStateSync defines the configuration of the ShootState controller. + ShootStateSync *ShootStateSyncControllerConfiguration + // ShootedSeedRegistration the configuration of the shooted seed registration controller. + ShootedSeedRegistration *ShootedSeedRegistrationControllerConfiguration + // SeedAPIServerNetworkPolicy defines the configuration of the SeedAPIServerNetworkPolicy controller. + SeedAPIServerNetworkPolicy *SeedAPIServerNetworkPolicyControllerConfiguration +} + +// BackupBucketControllerConfiguration defines the configuration of the BackupBucket +// controller. +type BackupBucketControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on events. + ConcurrentSyncs *int +} + +// BackupEntryControllerConfiguration defines the configuration of the BackupEntry +// controller. +type BackupEntryControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on events. + ConcurrentSyncs *int + // DeletionGracePeriodHours holds the period in number of hours to delete the BackupEntry after deletion timestamp is set. + // If value is set to 0 then the BackupEntryController will trigger deletion immediately. + DeletionGracePeriodHours *int +} + +// ControllerInstallationControllerConfiguration defines the configuration of the +// ControllerInstallation controller. +type ControllerInstallationControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on + // events. + ConcurrentSyncs *int +} + +// ControllerInstallationCareControllerConfiguration defines the configuration of the ControllerInstallationCare +// controller. +type ControllerInstallationCareControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on + // events. + ConcurrentSyncs *int + // SyncPeriod is the duration how often the existing resources are reconciled (how + // often the health check of ControllerInstallations is performed. + SyncPeriod *metav1.Duration +} + +// ControllerInstallationRequiredControllerConfiguration defines the configuration of the ControllerInstallationRequired +// controller. +type ControllerInstallationRequiredControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on + // events. + ConcurrentSyncs *int +} + +// SeedControllerConfiguration defines the configuration of the Seed controller. +type SeedControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on + // events. + ConcurrentSyncs *int + // SyncPeriod is the duration how often the existing resources are reconciled. + SyncPeriod *metav1.Duration +} + +// ShootControllerConfiguration defines the configuration of the Shoot +// controller. +type ShootControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on + // events. + ConcurrentSyncs *int + // ProgressReportPeriod is the period how often the progress of a shoot operation will be reported in the + // Shoot's `.status.lastOperation` field. By default, the progress will be reported immediately after a task of the + // respective flow has been completed. If you set this to a value > 0 (e.g., 5s) then it will be only reported every + // 5 seconds. Any tasks that were completed in the meantime will not be reported. + ProgressReportPeriod *metav1.Duration + // ReconcileInMaintenanceOnly determines whether Shoot reconciliations happen only + // during its maintenance time window. + ReconcileInMaintenanceOnly *bool + // RespectSyncPeriodOverwrite determines whether a sync period overwrite of a + // Shoot (via annotation) is respected or not. Defaults to false. + RespectSyncPeriodOverwrite *bool + // RetryDuration is the maximum duration how often a reconciliation will be retried + // in case of errors. + RetryDuration *metav1.Duration + // SyncPeriod is the duration how often the existing resources are reconciled. + SyncPeriod *metav1.Duration + // DNSEntryTTLSeconds is the TTL in seconds that is being used for DNS entries when reconciling shoots. + // Default: 120s + DNSEntryTTLSeconds *int64 +} + +// ShootCareControllerConfiguration defines the configuration of the ShootCare +// controller. +type ShootCareControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on + // events. + ConcurrentSyncs *int + // SyncPeriod is the duration how often the existing resources are reconciled (how + // often the health check of Shoot clusters is performed (only if no operation is + // already running on them). + SyncPeriod *metav1.Duration + // StaleExtensionHealthChecks defines the configuration of the check for stale extension health checks. + StaleExtensionHealthChecks *StaleExtensionHealthChecks + // ConditionThresholds defines the condition threshold per condition type. + ConditionThresholds []ConditionThreshold +} + +// StaleExtensionHealthChecks defines the configuration of the check for stale extension health checks. +type StaleExtensionHealthChecks struct { + // Enabled specifies whether the check for stale extensions health checks is enabled. + // Defaults to true. + Enabled bool + // Threshold configures the threshold when gardenlet considers a health check report of an extension CRD as outdated. + // The threshold should have some leeway in case a Gardener extension is temporarily unavailable. + // Defaults to 5m. + Threshold *metav1.Duration +} + +// ShootedSeedRegistrationControllerConfiguration defines the configuration of the shooted seed registration controller. +type ShootedSeedRegistrationControllerConfiguration struct { + // SyncJitterPeriod is a jitter duration for the reconciler sync that can be used to distribute the syncs randomly. + // If its value is greater than 0 then the shooted seeds will not be enqueued immediately but only after a random + // duration between 0 and the configured value. It is defaulted to 5m. + SyncJitterPeriod *metav1.Duration +} + +// ConditionThreshold defines the duration how long a flappy condition stays in progressing state. +type ConditionThreshold struct { + // Type is the type of the condition to define the threshold for. + Type string + // Duration is the duration how long the condition can stay in the progressing state. + Duration metav1.Duration +} + +// ShootStateSyncControllerConfiguration defines the configuration of the ShootState Sync controller. +type ShootStateSyncControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on + // events. + ConcurrentSyncs *int + // SyncPeriod is the duration how often the existing extension resources are synced to the ShootState resource + SyncPeriod *metav1.Duration +} + +// SeedAPIServerNetworkPolicyControllerConfiguration defines the configuration of the SeedAPIServerNetworkPolicy +// controller. +type SeedAPIServerNetworkPolicyControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on events. + ConcurrentSyncs *int +} + +// ResourcesConfiguration defines the total capacity for seed resources and the amount reserved for use by Gardener. +type ResourcesConfiguration struct { + // Capacity defines the total resources of a seed. + Capacity corev1.ResourceList + // Reserved defines the resources of a seed that are reserved for use by Gardener. + // Defaults to 0. + Reserved corev1.ResourceList +} + +// LeaderElectionConfiguration defines the configuration of leader election +// clients for components that can run with leader election enabled. +type LeaderElectionConfiguration struct { + componentbaseconfig.LeaderElectionConfiguration + // LockObjectNamespace defines the namespace of the lock object. + LockObjectNamespace *string + // LockObjectName defines the lock object name. + LockObjectName *string +} + +// SeedConfig contains configuration for the seed cluster. +type SeedConfig struct { + gardencore.SeedTemplate +} + +// FluentBit contains configuration for Fluent Bit. +type FluentBit struct { + // ServiceSection defines [SERVICE] configuration for the fluent-bit. + // If it is nil, fluent-bit uses default service configuration. + ServiceSection *string + // InputSection defines [INPUT] configuration for the fluent-bit. + // If it is nil, fluent-bit uses default input configuration. + InputSection *string + // OutputSection defines [OUTPUT] configuration for the fluent-bit. + // If it is nil, fluent-bit uses default output configuration. + OutputSection *string +} + +// Logging contains configuration for the logging stack. +type Logging struct { + // FluentBit contains configurations for the fluent-bit + FluentBit *FluentBit +} + +// ServerConfiguration contains details for the HTTP(S) servers. +type ServerConfiguration struct { + // HTTPS is the configuration for the HTTPS server. + HTTPS HTTPSServer +} + +// Server contains information for HTTP(S) server configuration. +type Server struct { + // BindAddress is the IP address on which to listen for the specified port. + BindAddress string + // Port is the port on which to serve unsecured, unauthenticated access. + Port int +} + +// HTTPSServer is the configuration for the HTTPSServer server. +type HTTPSServer struct { + // Server is the configuration for the bind address and the port. + Server + // TLSServer contains information about the TLS configuration for a HTTPS server. If empty then a proper server + // certificate will be self-generated during startup. + TLS *TLSServer +} + +// TLSServer contains information about the TLS configuration for a HTTPS server. +type TLSServer struct { + // ServerCertPath is the path to the server certificate file. + ServerCertPath string + // ServerKeyPath is the path to the private key file. + ServerKeyPath string +} + +// SNI contains an optional configuration for the APIServerSNI feature used +// by the Gardenlet in the seed clusters. +type SNI struct { + // Ingress is the ingressgateway configuration. + Ingress *SNIIngress +} + +// SNIIngress contains configuration of the ingressgateway. +type SNIIngress struct { + // ServiceName is the name of the ingressgateway Service. + // Defaults to "istio-ingressgateway". + ServiceName *string + // Namespace is the namespace in which the ingressgateway is deployed in. + // Defaults to "istio-ingress". + Namespace *string + // Labels of the ingressgateway + // Defaults to "istio: ingressgateway". + Labels map[string]string +} diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/defaults.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/defaults.go new file mode 100644 index 000000000..2109ebfb9 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/defaults.go @@ -0,0 +1,323 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/leaderelection/resourcelock" + componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" + "k8s.io/utils/pointer" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +// SetDefaults_GardenletConfiguration sets defaults for the configuration of the Gardenlet. +func SetDefaults_GardenletConfiguration(obj *GardenletConfiguration) { + if obj.GardenClientConnection == nil { + obj.GardenClientConnection = &GardenClientConnection{} + } + + if obj.SeedClientConnection == nil { + obj.SeedClientConnection = &SeedClientConnection{} + } + + if obj.ShootClientConnection == nil { + obj.ShootClientConnection = &ShootClientConnection{} + } + + if obj.Controllers == nil { + obj.Controllers = &GardenletControllerConfiguration{} + } + if obj.Controllers.BackupBucket == nil { + obj.Controllers.BackupBucket = &BackupBucketControllerConfiguration{} + } + if obj.Controllers.BackupEntry == nil { + obj.Controllers.BackupEntry = &BackupEntryControllerConfiguration{} + } + if obj.Controllers.ControllerInstallation == nil { + obj.Controllers.ControllerInstallation = &ControllerInstallationControllerConfiguration{} + } + if obj.Controllers.ControllerInstallationCare == nil { + obj.Controllers.ControllerInstallationCare = &ControllerInstallationCareControllerConfiguration{} + } + if obj.Controllers.ControllerInstallationRequired == nil { + obj.Controllers.ControllerInstallationRequired = &ControllerInstallationRequiredControllerConfiguration{} + } + if obj.Controllers.Seed == nil { + obj.Controllers.Seed = &SeedControllerConfiguration{} + } + if obj.Controllers.Shoot == nil { + obj.Controllers.Shoot = &ShootControllerConfiguration{} + } + if obj.Controllers.ShootCare == nil { + obj.Controllers.ShootCare = &ShootCareControllerConfiguration{} + } + if obj.Controllers.ShootStateSync == nil { + obj.Controllers.ShootStateSync = &ShootStateSyncControllerConfiguration{} + } + if obj.Controllers.ShootedSeedRegistration == nil { + obj.Controllers.ShootedSeedRegistration = &ShootedSeedRegistrationControllerConfiguration{} + } + + if obj.Controllers.SeedAPIServerNetworkPolicy == nil { + obj.Controllers.SeedAPIServerNetworkPolicy = &SeedAPIServerNetworkPolicyControllerConfiguration{} + } + + if obj.LeaderElection == nil { + obj.LeaderElection = &LeaderElectionConfiguration{} + } + + if obj.LogLevel == nil { + v := DefaultLogLevel + obj.LogLevel = &v + } + + if obj.KubernetesLogLevel == nil { + v := DefaultKubernetesLogLevel + obj.KubernetesLogLevel = &v + } + + if obj.Server == nil { + obj.Server = &ServerConfiguration{} + } + if len(obj.Server.HTTPS.BindAddress) == 0 { + obj.Server.HTTPS.BindAddress = "0.0.0.0" + } + if obj.Server.HTTPS.Port == 0 { + obj.Server.HTTPS.Port = 2720 + } + + if obj.SNI == nil { + obj.SNI = &SNI{} + } +} + +// SetDefaults_ClientConnectionConfiguration sets defaults for the client connection objects. +func SetDefaults_ClientConnectionConfiguration(obj *componentbaseconfigv1alpha1.ClientConnectionConfiguration) { + if obj.QPS == 0.0 { + obj.QPS = 50.0 + } + if obj.Burst == 0 { + obj.Burst = 100 + } +} + +// SetDefaults_LeaderElectionConfiguration sets defaults for the leader election of the gardenlet. +func SetDefaults_LeaderElectionConfiguration(obj *LeaderElectionConfiguration) { + if obj.ResourceLock == "" { + // TODO: change default to leases after a few releases + // make sure, we had configmapsleases as default for a few releases before migrating to leases to ensure, + // all users had at least one version running with the hybrid lock to avoid split-brain scenarios when migrating. + obj.ResourceLock = resourcelock.ConfigMapsLeasesResourceLock + } + + componentbaseconfigv1alpha1.RecommendedDefaultLeaderElectionConfiguration(&obj.LeaderElectionConfiguration) + + if obj.LockObjectNamespace == nil { + v := GardenletDefaultLockObjectNamespace + obj.LockObjectNamespace = &v + } + if obj.LockObjectName == nil { + v := GardenletDefaultLockObjectName + obj.LockObjectName = &v + } +} + +// SetDefaults_BackupBucketControllerConfiguration sets defaults for the backup bucket controller. +func SetDefaults_BackupBucketControllerConfiguration(obj *BackupBucketControllerConfiguration) { + if obj.ConcurrentSyncs == nil { + v := DefaultControllerConcurrentSyncs + obj.ConcurrentSyncs = &v + } +} + +// SetDefaults_BackupEntryControllerConfiguration sets defaults for the backup entry controller. +func SetDefaults_BackupEntryControllerConfiguration(obj *BackupEntryControllerConfiguration) { + if obj.ConcurrentSyncs == nil { + v := DefaultControllerConcurrentSyncs + obj.ConcurrentSyncs = &v + } + + if obj.DeletionGracePeriodHours == nil || *obj.DeletionGracePeriodHours < 0 { + v := DefaultBackupEntryDeletionGracePeriodHours + obj.DeletionGracePeriodHours = &v + } +} + +// SetDefaults_ControllerInstallationControllerConfiguration sets defaults for the controller installation controller. +func SetDefaults_ControllerInstallationControllerConfiguration(obj *ControllerInstallationControllerConfiguration) { + if obj.ConcurrentSyncs == nil { + v := DefaultControllerConcurrentSyncs + obj.ConcurrentSyncs = &v + } +} + +// SetDefaults_ControllerInstallationCareControllerConfiguration sets defaults for the controller installation care controller. +func SetDefaults_ControllerInstallationCareControllerConfiguration(obj *ControllerInstallationCareControllerConfiguration) { + if obj.ConcurrentSyncs == nil { + v := DefaultControllerConcurrentSyncs + obj.ConcurrentSyncs = &v + } + + if obj.SyncPeriod == nil { + v := metav1.Duration{Duration: 30 * time.Second} + obj.SyncPeriod = &v + } +} + +// SetDefaults_ControllerInstallationRequiredControllerConfiguration sets defaults for the ControllerInstallationRequired controller. +func SetDefaults_ControllerInstallationRequiredControllerConfiguration(obj *ControllerInstallationRequiredControllerConfiguration) { + if obj.ConcurrentSyncs == nil { + // The controller actually starts one controller per extension resource per Seed. + // For one seed that is already 1 * 10 extension resources = 10 workers. + v := 1 + obj.ConcurrentSyncs = &v + } +} + +// SetDefaults_SeedControllerConfiguration sets defaults for the seed controller. +func SetDefaults_SeedControllerConfiguration(obj *SeedControllerConfiguration) { + if obj.ConcurrentSyncs == nil { + v := DefaultControllerConcurrentSyncs + obj.ConcurrentSyncs = &v + } + + if obj.SyncPeriod == nil { + v := DefaultControllerSyncPeriod + obj.SyncPeriod = &v + } +} + +// SetDefaults_ShootControllerConfiguration sets defaults for the shoot controller. +func SetDefaults_ShootControllerConfiguration(obj *ShootControllerConfiguration) { + if obj.ConcurrentSyncs == nil { + v := DefaultControllerConcurrentSyncs + obj.ConcurrentSyncs = &v + } + + if obj.SyncPeriod == nil { + v := metav1.Duration{Duration: time.Hour} + obj.SyncPeriod = &v + } + + if obj.RespectSyncPeriodOverwrite == nil { + falseVar := false + obj.RespectSyncPeriodOverwrite = &falseVar + } + + if obj.ReconcileInMaintenanceOnly == nil { + falseVar := false + obj.ReconcileInMaintenanceOnly = &falseVar + } + + if obj.RetryDuration == nil { + v := metav1.Duration{Duration: 12 * time.Hour} + obj.RetryDuration = &v + } + + if obj.DNSEntryTTLSeconds == nil { + obj.DNSEntryTTLSeconds = pointer.Int64Ptr(120) + } +} + +// SetDefaults_ShootCareControllerConfiguration sets defaults for the shoot care controller. +func SetDefaults_ShootCareControllerConfiguration(obj *ShootCareControllerConfiguration) { + if obj.ConcurrentSyncs == nil { + v := DefaultControllerConcurrentSyncs + obj.ConcurrentSyncs = &v + } + + if obj.SyncPeriod == nil { + v := metav1.Duration{Duration: time.Minute} + obj.SyncPeriod = &v + } + + if obj.StaleExtensionHealthChecks == nil { + v := StaleExtensionHealthChecks{Enabled: true} + obj.StaleExtensionHealthChecks = &v + } +} + +// SetDefaults_StaleExtensionHealthChecks sets defaults for the stale extension health checks. +func SetDefaults_StaleExtensionHealthChecks(obj *StaleExtensionHealthChecks) { + if obj.Threshold == nil { + v := metav1.Duration{Duration: 5 * time.Minute} + obj.Threshold = &v + } +} + +// SetDefaults_ShootStateSyncControllerConfiguration sets defaults for the shoot state controller. +func SetDefaults_ShootStateSyncControllerConfiguration(obj *ShootStateSyncControllerConfiguration) { + if obj.ConcurrentSyncs == nil { + // The controller actually starts one controller per extension resource per Seed. + // For one seed that is already 1 * 10 extension resources = 10 workers. + v := 1 + obj.ConcurrentSyncs = &v + } + + if obj.SyncPeriod == nil { + v := metav1.Duration{Duration: time.Minute} + obj.SyncPeriod = &v + } +} + +// SetDefaults_ShootedSeedRegistrationControllerConfiguration sets defaults for the shooted seed registration controller. +func SetDefaults_ShootedSeedRegistrationControllerConfiguration(obj *ShootedSeedRegistrationControllerConfiguration) { + if obj.SyncJitterPeriod == nil { + v := metav1.Duration{Duration: 5 * time.Minute} + obj.SyncJitterPeriod = &v + } +} + +// SetDefaults_SeedAPIServerNetworkPolicyControllerConfiguration sets defaults for the seed apiserver endpoints controller. +func SetDefaults_SeedAPIServerNetworkPolicyControllerConfiguration(obj *SeedAPIServerNetworkPolicyControllerConfiguration) { + if obj.ConcurrentSyncs == nil { + // only use few workers for each seed, as the API server endpoints should stay the same most of the time. + v := 3 + obj.ConcurrentSyncs = &v + } +} + +// SetDefaults_SNI sets defaults for SNI. +func SetDefaults_SNI(obj *SNI) { + if obj.Ingress == nil { + obj.Ingress = &SNIIngress{} + } +} + +// SetDefaults_SNIIngress sets defaults for SNI ingressgateway. +func SetDefaults_SNIIngress(obj *SNIIngress) { + var ( + defaultNS = DefaultSNIIngresNamespace + defaultSVCName = DefaultSNIIngresServiceName + ) + + if obj.Namespace == nil { + obj.Namespace = &defaultNS + } + + if obj.ServiceName == nil { + obj.ServiceName = &defaultSVCName + } + + if obj.Labels == nil { + obj.Labels = map[string]string{"istio": "ingressgateway"} + } +} diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/doc.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/doc.go new file mode 100644 index 000000000..fa831411a --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/doc.go @@ -0,0 +1,20 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=github.com/gardener/gardener/pkg/gardenlet/apis/config +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +package v1alpha1 // import "github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1" diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/register.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/register.go new file mode 100644 index 000000000..e526ecf16 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/register.go @@ -0,0 +1,54 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "gardenlet.config.gardener.cloud" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder used to register the Shoot resource. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + // AddToScheme is a pointer to SchemeBuilder.AddToScheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addDefaultingFuncs, addKnownTypes) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &GardenletConfiguration{}, + ) + return nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/types.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/types.go new file mode 100644 index 000000000..33e23de2e --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/types.go @@ -0,0 +1,472 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "time" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" + "k8s.io/klog" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GardenletConfiguration defines the configuration for the Gardenlet. +type GardenletConfiguration struct { + metav1.TypeMeta `json:",inline"` + // GardenClientConnection specifies the kubeconfig file and the client connection settings + // for the proxy server to use when communicating with the garden apiserver. + // +optional + GardenClientConnection *GardenClientConnection `json:"gardenClientConnection,omitempty"` + // SeedClientConnection specifies the client connection settings for the proxy server + // to use when communicating with the seed apiserver. + // +optional + SeedClientConnection *SeedClientConnection `json:"seedClientConnection,omitempty"` + // ShootClientConnection specifies the client connection settings for the proxy server + // to use when communicating with the shoot apiserver. + // +optional + ShootClientConnection *ShootClientConnection `json:"shootClientConnection,omitempty"` + // Controllers defines the configuration of the controllers. + // +optional + Controllers *GardenletControllerConfiguration `json:"controllers,omitempty"` + // Resources defines the total capacity for seed resources and the amount reserved for use by Gardener. + // +optional + Resources *ResourcesConfiguration `json:"resources,omitempty"` + // LeaderElection defines the configuration of leader election client. + // +optional + LeaderElection *LeaderElectionConfiguration `json:"leaderElection,omitempty"` + // LogLevel is the level/severity for the logs. Must be one of [info,debug,error]. + // +optional + LogLevel *string `json:"logLevel,omitempty"` + // KubernetesLogLevel is the log level used for Kubernetes' k8s.io/klog functions. + // +optional + KubernetesLogLevel *klog.Level `json:"kubernetesLogLevel,omitempty"` + // Server defines the configuration of the HTTP server. + // +optional + Server *ServerConfiguration `json:"server,omitempty"` + // FeatureGates is a map of feature names to bools that enable or disable alpha/experimental + // features. This field modifies piecemeal the built-in default values from + // "github.com/gardener/gardener/pkg/gardenlet/features/features.go". + // Default: nil + // +optional + FeatureGates map[string]bool `json:"featureGates,omitempty"` + // SeedConfig contains configuration for the seed cluster. Must not be set if seed selector is set. + // In this case the gardenlet creates the `Seed` object itself based on the provided config. + // +optional + SeedConfig *SeedConfig `json:"seedConfig,omitempty"` + // SeedSelector contains an optional list of labels on `Seed` resources that shall be managed by + // this gardenlet instance. In this case the `Seed` object is not managed by the Gardenlet and must + // be created by an operator/administrator. + // +optional + SeedSelector *metav1.LabelSelector `json:"seedSelector,omitempty"` + // Logging contains an optional configurations for the logging stack deployed + // by the Gardenlet in the seed clusters. + // +optional + Logging *Logging `json:"logging,omitempty"` + // SNI contains an optional configuration for the APIServerSNI feature used + // by the Gardenlet in the seed clusters. + // +optional + SNI *SNI `json:"sni,omitempty"` +} + +// GardenClientConnection specifies the kubeconfig file and the client connection settings +// for the proxy server to use when communicating with the garden apiserver. +type GardenClientConnection struct { + componentbaseconfigv1alpha1.ClientConnectionConfiguration `json:",inline"` + // GardenClusterAddress is the external address that the gardenlets can use to remotely connect to the Garden + // cluster. It is needed in case the gardenlet deploys itself into shooted seeds. + // +optional + GardenClusterAddress *string `json:"gardenClusterAddress,omitempty"` + // GardenClusterCACert is the external address that the gardenlets can use to remotely connect to the Garden + // cluster. It is needed in case the gardenlet deploys itself into shooted seeds. + // +optional + GardenClusterCACert []byte `json:"gardenClusterCACert,omitempty"` + // BootstrapKubeconfig is a reference to a secret that contains a data key 'kubeconfig' whose value + // is a kubeconfig that can be used for bootstrapping. If `kubeconfig` is given then only this kubeconfig + // will be considered. + // +optional + BootstrapKubeconfig *corev1.SecretReference `json:"bootstrapKubeconfig,omitempty"` + // KubeconfigSecret is the reference to a secret object that stores the gardenlet's kubeconfig that + // it uses to communicate with the garden cluster. If `kubeconfig` is given then only this kubeconfig + // will be considered. + // +optional + KubeconfigSecret *corev1.SecretReference `json:"kubeconfigSecret,omitempty"` +} + +// SeedClientConnection specifies the client connection settings +// for the proxy server to use when communicating with the seed apiserver. +type SeedClientConnection struct { + componentbaseconfigv1alpha1.ClientConnectionConfiguration `json:",inline"` +} + +// ShootClientConnection specifies the client connection settings +// for the proxy server to use when communicating with the shoot apiserver. +type ShootClientConnection struct { + componentbaseconfigv1alpha1.ClientConnectionConfiguration `json:",inline"` +} + +// GardenletControllerConfiguration defines the configuration of the controllers. +type GardenletControllerConfiguration struct { + // BackupBucket defines the configuration of the BackupBucket controller. + // +optional + BackupBucket *BackupBucketControllerConfiguration `json:"backupBucket"` + // BackupEntry defines the configuration of the BackupEntry controller. + // +optional + BackupEntry *BackupEntryControllerConfiguration `json:"backupEntry"` + // ControllerInstallation defines the configuration of the ControllerInstallation controller. + // +optional + ControllerInstallation *ControllerInstallationControllerConfiguration `json:"controllerInstallation,omitempty"` + // ControllerInstallationCare defines the configuration of the ControllerInstallationCare controller. + // +optional + ControllerInstallationCare *ControllerInstallationCareControllerConfiguration `json:"controllerInstallationCare,omitempty"` + // ControllerInstallationRequired defines the configuration of the ControllerInstallationRequired controller. + // +optional + ControllerInstallationRequired *ControllerInstallationRequiredControllerConfiguration `json:"controllerInstallationRequired,omitempty"` + // Seed defines the configuration of the Seed controller. + // +optional + Seed *SeedControllerConfiguration `json:"seed,omitempty"` + // Shoot defines the configuration of the Shoot controller. + // +optional + Shoot *ShootControllerConfiguration `json:"shoot,omitempty"` + // ShootCare defines the configuration of the ShootCare controller. + // +optional + ShootCare *ShootCareControllerConfiguration `json:"shootCare,omitempty"` + // ShootStateSync defines the configuration of the ShootState controller + // +optional + ShootStateSync *ShootStateSyncControllerConfiguration `json:"shootStateSync,omitempty"` + // ShootedSeedRegistration the configuration of the shooted seed registration controller. + // +optional + ShootedSeedRegistration *ShootedSeedRegistrationControllerConfiguration `json:"shootedSeedRegistration,omitempty"` + // SeedAPIServerNetworkPolicy defines the configuration of the SeedAPIServerNetworkPolicy controller + // +optional + SeedAPIServerNetworkPolicy *SeedAPIServerNetworkPolicyControllerConfiguration `json:"seedAPIServerNetworkPolicy,omitempty"` +} + +// BackupBucketControllerConfiguration defines the configuration of the BackupBucket +// controller. +type BackupBucketControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on events. + // +optional + ConcurrentSyncs *int `json:"concurrentSyncs,omitempty"` +} + +// BackupEntryControllerConfiguration defines the configuration of the BackupEntry +// controller. +type BackupEntryControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on events. + // +optional + ConcurrentSyncs *int `json:"concurrentSyncs,omitempty"` + // DeletionGracePeriodHours holds the period in number of hours to delete the BackupEntry after deletion timestamp is set. + // If value is set to 0 then the BackupEntryController will trigger deletion immediately. + // +optional + DeletionGracePeriodHours *int `json:"deletionGracePeriodHours,omitempty"` +} + +// ControllerInstallationControllerConfiguration defines the configuration of the +// ControllerInstallation controller. +type ControllerInstallationControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on + // events. + // +optional + ConcurrentSyncs *int `json:"concurrentSyncs,omitempty"` +} + +// ControllerInstallationCareControllerConfiguration defines the configuration of the ControllerInstallationCare +// controller. +type ControllerInstallationCareControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on + // events. + // +optional + ConcurrentSyncs *int `json:"concurrentSyncs,omitempty"` + // SyncPeriod is the duration how often the existing resources are reconciled (how + // often the health check of ControllerInstallations is performed. + // +optional + SyncPeriod *metav1.Duration `json:"syncPeriod,omitempty"` +} + +// ControllerInstallationRequiredControllerConfiguration defines the configuration of the ControllerInstallationRequired +// controller. +type ControllerInstallationRequiredControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on + // events. + // +optional + ConcurrentSyncs *int `json:"concurrentSyncs,omitempty"` +} + +// SeedControllerConfiguration defines the configuration of the Seed controller. +type SeedControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on + // events. + // +optional + ConcurrentSyncs *int `json:"concurrentSyncs,omitempty"` + // SyncPeriod is the duration how often the existing resources are reconciled. + // +optional + SyncPeriod *metav1.Duration `json:"syncPeriod,omitempty"` +} + +// ShootControllerConfiguration defines the configuration of the Shoot +// controller. +type ShootControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on + // events. + // +optional + ConcurrentSyncs *int `json:"concurrentSyncs,omitempty"` + // ProgressReportPeriod is the period how often the progress of a shoot operation will be reported in the + // Shoot's `.status.lastOperation` field. By default, the progress will be reported immediately after a task of the + // respective flow has been completed. If you set this to a value > 0 (e.g., 5s) then it will be only reported every + // 5 seconds. Any tasks that were completed in the meantime will not be reported. + // +optional + ProgressReportPeriod *metav1.Duration `json:"progressReportPeriod,omitempty"` + // ReconcileInMaintenanceOnly determines whether Shoot reconciliations happen only + // during its maintenance time window. + // +optional + ReconcileInMaintenanceOnly *bool `json:"reconcileInMaintenanceOnly,omitempty"` + // RespectSyncPeriodOverwrite determines whether a sync period overwrite of a + // Shoot (via annotation) is respected or not. Defaults to false. + // +optional + RespectSyncPeriodOverwrite *bool `json:"respectSyncPeriodOverwrite,omitempty"` + // RetryDuration is the maximum duration how often a reconciliation will be retried + // in case of errors. + // +optional + RetryDuration *metav1.Duration `json:"retryDuration,omitempty"` + // SyncPeriod is the duration how often the existing resources are reconciled. + // +optional + SyncPeriod *metav1.Duration `json:"syncPeriod,omitempty"` + // DNSEntryTTLSeconds is the TTL in seconds that is being used for DNS entries when reconciling shoots. + // Default: 120s + // +optional + DNSEntryTTLSeconds *int64 `json:"dnsEntryTTLSeconds,omitempty"` +} + +// ShootCareControllerConfiguration defines the configuration of the ShootCare +// controller. +type ShootCareControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on + // events. + // +optional + ConcurrentSyncs *int `json:"concurrentSyncs,omitempty"` + // SyncPeriod is the duration how often the existing resources are reconciled (how + // often the health check of Shoot clusters is performed (only if no operation is + // already running on them). + // +optional + SyncPeriod *metav1.Duration `json:"syncPeriod,omitempty"` + // StaleExtensionHealthChecks defines the configuration of the check for stale extension health checks. + // +optional + StaleExtensionHealthChecks *StaleExtensionHealthChecks `json:"staleExtensionHealthChecks,omitempty"` + // ConditionThresholds defines the condition threshold per condition type. + // +optional + ConditionThresholds []ConditionThreshold `json:"conditionThresholds,omitempty"` +} + +// StaleExtensionHealthChecks defines the configuration of the check for stale extension health checks. +type StaleExtensionHealthChecks struct { + // Enabled specifies whether the check for stale extensions health checks is enabled. + // Defaults to true. + Enabled bool `json:"enabled"` + // Threshold configures the threshold when gardenlet considers a health check report of an extension CRD as outdated. + // The threshold should have some leeway in case a Gardener extension is temporarily unavailable. + // Defaults to 5m. + // +optional + Threshold *metav1.Duration `json:"threshold,omitempty"` +} + +// ShootedSeedRegistrationControllerConfiguration defines the configuration of the shooted seed registration controller. +type ShootedSeedRegistrationControllerConfiguration struct { + // SyncJitterPeriod is a jitter duration for the reconciler sync that can be used to distribute the syncs randomly. + // If its value is greater than 0 then the shooted seeds will not be enqueued immediately but only after a random + // duration between 0 and the configured value. It is defaulted to 5m. + // +optional + SyncJitterPeriod *metav1.Duration `json:"syncJitterPeriod,omitempty"` +} + +// ConditionThreshold defines the duration how long a flappy condition stays in progressing state. +type ConditionThreshold struct { + // Type is the type of the condition to define the threshold for. + Type string `json:"type"` + // Duration is the duration how long the condition can stay in the progressing state. + Duration metav1.Duration `json:"duration"` +} + +// ShootStateSyncControllerConfiguration defines the configuration of the ShootState Sync controller. +type ShootStateSyncControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on + // events. + // +optional + ConcurrentSyncs *int `json:"concurrentSyncs,omitempty"` + // SyncPeriod is the duration how often the existing extension resources are synced to the ShootState resource + // +optional + SyncPeriod *metav1.Duration `json:"syncPeriod,omitempty"` +} + +// SeedAPIServerNetworkPolicyControllerConfiguration defines the configuration of the SeedAPIServerNetworkPolicy +// controller. +type SeedAPIServerNetworkPolicyControllerConfiguration struct { + // ConcurrentSyncs is the number of workers used for the controller to work on events. + // +optional + ConcurrentSyncs *int `json:"concurrentSyncs,omitempty"` +} + +// ResourcesConfiguration defines the total capacity for seed resources and the amount reserved for use by Gardener. +type ResourcesConfiguration struct { + // Capacity defines the total resources of a seed. + // +optional + Capacity corev1.ResourceList `json:"capacity,omitempty"` + // Reserved defines the resources of a seed that are reserved for use by Gardener. + // Defaults to 0. + // +optional + Reserved corev1.ResourceList `json:"reserved,omitempty"` +} + +// LeaderElectionConfiguration defines the configuration of leader election +// clients for components that can run with leader election enabled. +type LeaderElectionConfiguration struct { + componentbaseconfigv1alpha1.LeaderElectionConfiguration `json:",inline"` + // LockObjectNamespace defines the namespace of the lock object. + // +optional + LockObjectNamespace *string `json:"lockObjectNamespace,omitempty"` + // LockObjectName defines the lock object name. + // +optional + LockObjectName *string `json:"lockObjectName,omitempty"` +} + +// SeedConfig contains configuration for the seed cluster. +type SeedConfig struct { + gardencorev1beta1.SeedTemplate `json:",inline"` +} + +// FluentBit contains configuration for Fluent Bit. +type FluentBit struct { + // ServiceSection defines [SERVICE] configuration for the fluent-bit. + // If it is nil, fluent-bit uses default service configuration. + // +optional + ServiceSection *string `json:"service,omitempty" yaml:"service,omitempty"` + // InputSection defines [INPUT] configuration for the fluent-bit. + // If it is nil, fluent-bit uses default input configuration. + // +optional + InputSection *string `json:"input,omitempty" yaml:"input,omitempty"` + // OutputSection defines [OUTPUT] configuration for the fluent-bit. + // If it is nil, fluent-bit uses default output configuration. + // +optional + OutputSection *string `json:"output,omitempty" yaml:"output,omitempty"` +} + +// Logging contains configuration for the logging stack. +type Logging struct { + // FluentBit contains configurations for the fluent-bit + // +optional + FluentBit *FluentBit `json:"fluentBit,omitempty" yaml:"fluentBit,omitempty"` +} + +// ServerConfiguration contains details for the HTTP(S) servers. +type ServerConfiguration struct { + // HTTPS is the configuration for the HTTPS server. + HTTPS HTTPSServer `json:"https"` +} + +// Server contains information for HTTP(S) server configuration. +type Server struct { + // BindAddress is the IP address on which to listen for the specified port. + BindAddress string `json:"bindAddress"` + // Port is the port on which to serve unsecured, unauthenticated access. + Port int `json:"port"` +} + +// HTTPSServer is the configuration for the HTTPSServer server. +type HTTPSServer struct { + // Server is the configuration for the bind address and the port. + Server `json:",inline"` + // TLSServer contains information about the TLS configuration for a HTTPS server. If empty then a proper server + // certificate will be self-generated during startup. + // +optional + TLS *TLSServer `json:"tls,omitempty"` +} + +// TLSServer contains information about the TLS configuration for a HTTPS server. +type TLSServer struct { + // ServerCertPath is the path to the server certificate file. + ServerCertPath string `json:"serverCertPath"` + // ServerKeyPath is the path to the private key file. + ServerKeyPath string `json:"serverKeyPath"` +} + +// SNI contains an optional configuration for the APIServerSNI feature used +// by the Gardenlet in the seed clusters. +type SNI struct { + // Ingress is the ingressgateway configuration. + // +optional + Ingress *SNIIngress `json:"ingress,omitempty"` +} + +// SNIIngress contains configuration of the ingressgateway. +type SNIIngress struct { + // ServiceName is the name of the ingressgateway Service. + // Defaults to "istio-ingressgateway". + // +optional + ServiceName *string `json:"serviceName,omitempty"` + // Namespace is the namespace in which the ingressgateway is deployed in. + // Defaults to "istio-ingress". + // +optional + Namespace *string `json:"namespace,omitempty"` + // Labels of the ingressgateway + // Defaults to "istio: ingressgateway". + // +optional + Labels map[string]string `json:"labels,omitempty"` +} + +const ( + // GardenletDefaultLockObjectNamespace is the default lock namespace for leader election. + GardenletDefaultLockObjectNamespace = "garden" + + // GardenletDefaultLockObjectName is the default lock name for leader election. + GardenletDefaultLockObjectName = "gardenlet-leader-election" + + // DefaultBackupEntryDeletionGracePeriodHours is a constant for the default number of hours the Backup Entry should be kept after shoot is deleted. + // By default we set this to 0 so that then BackupEntryController will trigger deletion immediately. + DefaultBackupEntryDeletionGracePeriodHours = 0 + + // DefaultDiscoveryDirName is the name of the default directory used for discovering Kubernetes APIs. + DefaultDiscoveryDirName = "gardenlet-discovery" + + // DefaultDiscoveryCacheDirName is the name of the default directory used for the discovery cache. + DefaultDiscoveryCacheDirName = "cache" + + // DefaultDiscoveryHTTPCacheDirName is the name of the default directory used for the discovery HTTP cache. + DefaultDiscoveryHTTPCacheDirName = "http-cache" + + // DefaultDiscoveryTTL is the default ttl for the cached discovery client. + DefaultDiscoveryTTL = 10 * time.Second + + // DefaultLogLevel is the default log level. + DefaultLogLevel = "info" + + // DefaultKubernetesLogLevel is the default Kubernetes log level. + DefaultKubernetesLogLevel klog.Level = 0 + + // DefaultControllerConcurrentSyncs is a default value for concurrent syncs for controllers. + DefaultControllerConcurrentSyncs = 20 + + // DefaultSNIIngresNamespace is the default sni ingress namespace. + DefaultSNIIngresNamespace = "istio-ingress" + + // DefaultSNIIngresServiceName is the default sni ingress service name. + DefaultSNIIngresServiceName = "istio-ingressgateway" +) + +// DefaultControllerSyncPeriod is a default value for sync period for controllers. +var DefaultControllerSyncPeriod = metav1.Duration{Duration: time.Minute} diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.conversion.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.conversion.go new file mode 100644 index 000000000..6995db099 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,1127 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + unsafe "unsafe" + + config "github.com/gardener/gardener/pkg/gardenlet/apis/config" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + configv1alpha1 "k8s.io/component-base/config/v1alpha1" + klog "k8s.io/klog" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*BackupBucketControllerConfiguration)(nil), (*config.BackupBucketControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_BackupBucketControllerConfiguration_To_config_BackupBucketControllerConfiguration(a.(*BackupBucketControllerConfiguration), b.(*config.BackupBucketControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.BackupBucketControllerConfiguration)(nil), (*BackupBucketControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_BackupBucketControllerConfiguration_To_v1alpha1_BackupBucketControllerConfiguration(a.(*config.BackupBucketControllerConfiguration), b.(*BackupBucketControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*BackupEntryControllerConfiguration)(nil), (*config.BackupEntryControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_BackupEntryControllerConfiguration_To_config_BackupEntryControllerConfiguration(a.(*BackupEntryControllerConfiguration), b.(*config.BackupEntryControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.BackupEntryControllerConfiguration)(nil), (*BackupEntryControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_BackupEntryControllerConfiguration_To_v1alpha1_BackupEntryControllerConfiguration(a.(*config.BackupEntryControllerConfiguration), b.(*BackupEntryControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ConditionThreshold)(nil), (*config.ConditionThreshold)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ConditionThreshold_To_config_ConditionThreshold(a.(*ConditionThreshold), b.(*config.ConditionThreshold), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ConditionThreshold)(nil), (*ConditionThreshold)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ConditionThreshold_To_v1alpha1_ConditionThreshold(a.(*config.ConditionThreshold), b.(*ConditionThreshold), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerInstallationCareControllerConfiguration)(nil), (*config.ControllerInstallationCareControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ControllerInstallationCareControllerConfiguration_To_config_ControllerInstallationCareControllerConfiguration(a.(*ControllerInstallationCareControllerConfiguration), b.(*config.ControllerInstallationCareControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ControllerInstallationCareControllerConfiguration)(nil), (*ControllerInstallationCareControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ControllerInstallationCareControllerConfiguration_To_v1alpha1_ControllerInstallationCareControllerConfiguration(a.(*config.ControllerInstallationCareControllerConfiguration), b.(*ControllerInstallationCareControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerInstallationControllerConfiguration)(nil), (*config.ControllerInstallationControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ControllerInstallationControllerConfiguration_To_config_ControllerInstallationControllerConfiguration(a.(*ControllerInstallationControllerConfiguration), b.(*config.ControllerInstallationControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ControllerInstallationControllerConfiguration)(nil), (*ControllerInstallationControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ControllerInstallationControllerConfiguration_To_v1alpha1_ControllerInstallationControllerConfiguration(a.(*config.ControllerInstallationControllerConfiguration), b.(*ControllerInstallationControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerInstallationRequiredControllerConfiguration)(nil), (*config.ControllerInstallationRequiredControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ControllerInstallationRequiredControllerConfiguration_To_config_ControllerInstallationRequiredControllerConfiguration(a.(*ControllerInstallationRequiredControllerConfiguration), b.(*config.ControllerInstallationRequiredControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ControllerInstallationRequiredControllerConfiguration)(nil), (*ControllerInstallationRequiredControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ControllerInstallationRequiredControllerConfiguration_To_v1alpha1_ControllerInstallationRequiredControllerConfiguration(a.(*config.ControllerInstallationRequiredControllerConfiguration), b.(*ControllerInstallationRequiredControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FluentBit)(nil), (*config.FluentBit)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_FluentBit_To_config_FluentBit(a.(*FluentBit), b.(*config.FluentBit), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.FluentBit)(nil), (*FluentBit)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_FluentBit_To_v1alpha1_FluentBit(a.(*config.FluentBit), b.(*FluentBit), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*GardenClientConnection)(nil), (*config.GardenClientConnection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_GardenClientConnection_To_config_GardenClientConnection(a.(*GardenClientConnection), b.(*config.GardenClientConnection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.GardenClientConnection)(nil), (*GardenClientConnection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_GardenClientConnection_To_v1alpha1_GardenClientConnection(a.(*config.GardenClientConnection), b.(*GardenClientConnection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*GardenletConfiguration)(nil), (*config.GardenletConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_GardenletConfiguration_To_config_GardenletConfiguration(a.(*GardenletConfiguration), b.(*config.GardenletConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.GardenletConfiguration)(nil), (*GardenletConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_GardenletConfiguration_To_v1alpha1_GardenletConfiguration(a.(*config.GardenletConfiguration), b.(*GardenletConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*GardenletControllerConfiguration)(nil), (*config.GardenletControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_GardenletControllerConfiguration_To_config_GardenletControllerConfiguration(a.(*GardenletControllerConfiguration), b.(*config.GardenletControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.GardenletControllerConfiguration)(nil), (*GardenletControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_GardenletControllerConfiguration_To_v1alpha1_GardenletControllerConfiguration(a.(*config.GardenletControllerConfiguration), b.(*GardenletControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*HTTPSServer)(nil), (*config.HTTPSServer)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_HTTPSServer_To_config_HTTPSServer(a.(*HTTPSServer), b.(*config.HTTPSServer), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.HTTPSServer)(nil), (*HTTPSServer)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_HTTPSServer_To_v1alpha1_HTTPSServer(a.(*config.HTTPSServer), b.(*HTTPSServer), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*LeaderElectionConfiguration)(nil), (*config.LeaderElectionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(a.(*LeaderElectionConfiguration), b.(*config.LeaderElectionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.LeaderElectionConfiguration)(nil), (*LeaderElectionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(a.(*config.LeaderElectionConfiguration), b.(*LeaderElectionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Logging)(nil), (*config.Logging)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Logging_To_config_Logging(a.(*Logging), b.(*config.Logging), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.Logging)(nil), (*Logging)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_Logging_To_v1alpha1_Logging(a.(*config.Logging), b.(*Logging), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ResourcesConfiguration)(nil), (*config.ResourcesConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ResourcesConfiguration_To_config_ResourcesConfiguration(a.(*ResourcesConfiguration), b.(*config.ResourcesConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ResourcesConfiguration)(nil), (*ResourcesConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ResourcesConfiguration_To_v1alpha1_ResourcesConfiguration(a.(*config.ResourcesConfiguration), b.(*ResourcesConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SNI)(nil), (*config.SNI)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SNI_To_config_SNI(a.(*SNI), b.(*config.SNI), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.SNI)(nil), (*SNI)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_SNI_To_v1alpha1_SNI(a.(*config.SNI), b.(*SNI), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SNIIngress)(nil), (*config.SNIIngress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SNIIngress_To_config_SNIIngress(a.(*SNIIngress), b.(*config.SNIIngress), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.SNIIngress)(nil), (*SNIIngress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_SNIIngress_To_v1alpha1_SNIIngress(a.(*config.SNIIngress), b.(*SNIIngress), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedAPIServerNetworkPolicyControllerConfiguration)(nil), (*config.SeedAPIServerNetworkPolicyControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedAPIServerNetworkPolicyControllerConfiguration_To_config_SeedAPIServerNetworkPolicyControllerConfiguration(a.(*SeedAPIServerNetworkPolicyControllerConfiguration), b.(*config.SeedAPIServerNetworkPolicyControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.SeedAPIServerNetworkPolicyControllerConfiguration)(nil), (*SeedAPIServerNetworkPolicyControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_SeedAPIServerNetworkPolicyControllerConfiguration_To_v1alpha1_SeedAPIServerNetworkPolicyControllerConfiguration(a.(*config.SeedAPIServerNetworkPolicyControllerConfiguration), b.(*SeedAPIServerNetworkPolicyControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedClientConnection)(nil), (*config.SeedClientConnection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedClientConnection_To_config_SeedClientConnection(a.(*SeedClientConnection), b.(*config.SeedClientConnection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.SeedClientConnection)(nil), (*SeedClientConnection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_SeedClientConnection_To_v1alpha1_SeedClientConnection(a.(*config.SeedClientConnection), b.(*SeedClientConnection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedConfig)(nil), (*config.SeedConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedConfig_To_config_SeedConfig(a.(*SeedConfig), b.(*config.SeedConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.SeedConfig)(nil), (*SeedConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_SeedConfig_To_v1alpha1_SeedConfig(a.(*config.SeedConfig), b.(*SeedConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedControllerConfiguration)(nil), (*config.SeedControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_SeedControllerConfiguration_To_config_SeedControllerConfiguration(a.(*SeedControllerConfiguration), b.(*config.SeedControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.SeedControllerConfiguration)(nil), (*SeedControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_SeedControllerConfiguration_To_v1alpha1_SeedControllerConfiguration(a.(*config.SeedControllerConfiguration), b.(*SeedControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Server)(nil), (*config.Server)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Server_To_config_Server(a.(*Server), b.(*config.Server), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.Server)(nil), (*Server)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_Server_To_v1alpha1_Server(a.(*config.Server), b.(*Server), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ServerConfiguration)(nil), (*config.ServerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ServerConfiguration_To_config_ServerConfiguration(a.(*ServerConfiguration), b.(*config.ServerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ServerConfiguration)(nil), (*ServerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ServerConfiguration_To_v1alpha1_ServerConfiguration(a.(*config.ServerConfiguration), b.(*ServerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ShootCareControllerConfiguration)(nil), (*config.ShootCareControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ShootCareControllerConfiguration_To_config_ShootCareControllerConfiguration(a.(*ShootCareControllerConfiguration), b.(*config.ShootCareControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ShootCareControllerConfiguration)(nil), (*ShootCareControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ShootCareControllerConfiguration_To_v1alpha1_ShootCareControllerConfiguration(a.(*config.ShootCareControllerConfiguration), b.(*ShootCareControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ShootClientConnection)(nil), (*config.ShootClientConnection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ShootClientConnection_To_config_ShootClientConnection(a.(*ShootClientConnection), b.(*config.ShootClientConnection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ShootClientConnection)(nil), (*ShootClientConnection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ShootClientConnection_To_v1alpha1_ShootClientConnection(a.(*config.ShootClientConnection), b.(*ShootClientConnection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ShootControllerConfiguration)(nil), (*config.ShootControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ShootControllerConfiguration_To_config_ShootControllerConfiguration(a.(*ShootControllerConfiguration), b.(*config.ShootControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ShootControllerConfiguration)(nil), (*ShootControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ShootControllerConfiguration_To_v1alpha1_ShootControllerConfiguration(a.(*config.ShootControllerConfiguration), b.(*ShootControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ShootStateSyncControllerConfiguration)(nil), (*config.ShootStateSyncControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ShootStateSyncControllerConfiguration_To_config_ShootStateSyncControllerConfiguration(a.(*ShootStateSyncControllerConfiguration), b.(*config.ShootStateSyncControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ShootStateSyncControllerConfiguration)(nil), (*ShootStateSyncControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ShootStateSyncControllerConfiguration_To_v1alpha1_ShootStateSyncControllerConfiguration(a.(*config.ShootStateSyncControllerConfiguration), b.(*ShootStateSyncControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ShootedSeedRegistrationControllerConfiguration)(nil), (*config.ShootedSeedRegistrationControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ShootedSeedRegistrationControllerConfiguration_To_config_ShootedSeedRegistrationControllerConfiguration(a.(*ShootedSeedRegistrationControllerConfiguration), b.(*config.ShootedSeedRegistrationControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.ShootedSeedRegistrationControllerConfiguration)(nil), (*ShootedSeedRegistrationControllerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_ShootedSeedRegistrationControllerConfiguration_To_v1alpha1_ShootedSeedRegistrationControllerConfiguration(a.(*config.ShootedSeedRegistrationControllerConfiguration), b.(*ShootedSeedRegistrationControllerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*StaleExtensionHealthChecks)(nil), (*config.StaleExtensionHealthChecks)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_StaleExtensionHealthChecks_To_config_StaleExtensionHealthChecks(a.(*StaleExtensionHealthChecks), b.(*config.StaleExtensionHealthChecks), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.StaleExtensionHealthChecks)(nil), (*StaleExtensionHealthChecks)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_StaleExtensionHealthChecks_To_v1alpha1_StaleExtensionHealthChecks(a.(*config.StaleExtensionHealthChecks), b.(*StaleExtensionHealthChecks), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*TLSServer)(nil), (*config.TLSServer)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_TLSServer_To_config_TLSServer(a.(*TLSServer), b.(*config.TLSServer), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*config.TLSServer)(nil), (*TLSServer)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_config_TLSServer_To_v1alpha1_TLSServer(a.(*config.TLSServer), b.(*TLSServer), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_BackupBucketControllerConfiguration_To_config_BackupBucketControllerConfiguration(in *BackupBucketControllerConfiguration, out *config.BackupBucketControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + return nil +} + +// Convert_v1alpha1_BackupBucketControllerConfiguration_To_config_BackupBucketControllerConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_BackupBucketControllerConfiguration_To_config_BackupBucketControllerConfiguration(in *BackupBucketControllerConfiguration, out *config.BackupBucketControllerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_BackupBucketControllerConfiguration_To_config_BackupBucketControllerConfiguration(in, out, s) +} + +func autoConvert_config_BackupBucketControllerConfiguration_To_v1alpha1_BackupBucketControllerConfiguration(in *config.BackupBucketControllerConfiguration, out *BackupBucketControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + return nil +} + +// Convert_config_BackupBucketControllerConfiguration_To_v1alpha1_BackupBucketControllerConfiguration is an autogenerated conversion function. +func Convert_config_BackupBucketControllerConfiguration_To_v1alpha1_BackupBucketControllerConfiguration(in *config.BackupBucketControllerConfiguration, out *BackupBucketControllerConfiguration, s conversion.Scope) error { + return autoConvert_config_BackupBucketControllerConfiguration_To_v1alpha1_BackupBucketControllerConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_BackupEntryControllerConfiguration_To_config_BackupEntryControllerConfiguration(in *BackupEntryControllerConfiguration, out *config.BackupEntryControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + out.DeletionGracePeriodHours = (*int)(unsafe.Pointer(in.DeletionGracePeriodHours)) + return nil +} + +// Convert_v1alpha1_BackupEntryControllerConfiguration_To_config_BackupEntryControllerConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_BackupEntryControllerConfiguration_To_config_BackupEntryControllerConfiguration(in *BackupEntryControllerConfiguration, out *config.BackupEntryControllerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_BackupEntryControllerConfiguration_To_config_BackupEntryControllerConfiguration(in, out, s) +} + +func autoConvert_config_BackupEntryControllerConfiguration_To_v1alpha1_BackupEntryControllerConfiguration(in *config.BackupEntryControllerConfiguration, out *BackupEntryControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + out.DeletionGracePeriodHours = (*int)(unsafe.Pointer(in.DeletionGracePeriodHours)) + return nil +} + +// Convert_config_BackupEntryControllerConfiguration_To_v1alpha1_BackupEntryControllerConfiguration is an autogenerated conversion function. +func Convert_config_BackupEntryControllerConfiguration_To_v1alpha1_BackupEntryControllerConfiguration(in *config.BackupEntryControllerConfiguration, out *BackupEntryControllerConfiguration, s conversion.Scope) error { + return autoConvert_config_BackupEntryControllerConfiguration_To_v1alpha1_BackupEntryControllerConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_ConditionThreshold_To_config_ConditionThreshold(in *ConditionThreshold, out *config.ConditionThreshold, s conversion.Scope) error { + out.Type = in.Type + out.Duration = in.Duration + return nil +} + +// Convert_v1alpha1_ConditionThreshold_To_config_ConditionThreshold is an autogenerated conversion function. +func Convert_v1alpha1_ConditionThreshold_To_config_ConditionThreshold(in *ConditionThreshold, out *config.ConditionThreshold, s conversion.Scope) error { + return autoConvert_v1alpha1_ConditionThreshold_To_config_ConditionThreshold(in, out, s) +} + +func autoConvert_config_ConditionThreshold_To_v1alpha1_ConditionThreshold(in *config.ConditionThreshold, out *ConditionThreshold, s conversion.Scope) error { + out.Type = in.Type + out.Duration = in.Duration + return nil +} + +// Convert_config_ConditionThreshold_To_v1alpha1_ConditionThreshold is an autogenerated conversion function. +func Convert_config_ConditionThreshold_To_v1alpha1_ConditionThreshold(in *config.ConditionThreshold, out *ConditionThreshold, s conversion.Scope) error { + return autoConvert_config_ConditionThreshold_To_v1alpha1_ConditionThreshold(in, out, s) +} + +func autoConvert_v1alpha1_ControllerInstallationCareControllerConfiguration_To_config_ControllerInstallationCareControllerConfiguration(in *ControllerInstallationCareControllerConfiguration, out *config.ControllerInstallationCareControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + out.SyncPeriod = (*v1.Duration)(unsafe.Pointer(in.SyncPeriod)) + return nil +} + +// Convert_v1alpha1_ControllerInstallationCareControllerConfiguration_To_config_ControllerInstallationCareControllerConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_ControllerInstallationCareControllerConfiguration_To_config_ControllerInstallationCareControllerConfiguration(in *ControllerInstallationCareControllerConfiguration, out *config.ControllerInstallationCareControllerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_ControllerInstallationCareControllerConfiguration_To_config_ControllerInstallationCareControllerConfiguration(in, out, s) +} + +func autoConvert_config_ControllerInstallationCareControllerConfiguration_To_v1alpha1_ControllerInstallationCareControllerConfiguration(in *config.ControllerInstallationCareControllerConfiguration, out *ControllerInstallationCareControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + out.SyncPeriod = (*v1.Duration)(unsafe.Pointer(in.SyncPeriod)) + return nil +} + +// Convert_config_ControllerInstallationCareControllerConfiguration_To_v1alpha1_ControllerInstallationCareControllerConfiguration is an autogenerated conversion function. +func Convert_config_ControllerInstallationCareControllerConfiguration_To_v1alpha1_ControllerInstallationCareControllerConfiguration(in *config.ControllerInstallationCareControllerConfiguration, out *ControllerInstallationCareControllerConfiguration, s conversion.Scope) error { + return autoConvert_config_ControllerInstallationCareControllerConfiguration_To_v1alpha1_ControllerInstallationCareControllerConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_ControllerInstallationControllerConfiguration_To_config_ControllerInstallationControllerConfiguration(in *ControllerInstallationControllerConfiguration, out *config.ControllerInstallationControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + return nil +} + +// Convert_v1alpha1_ControllerInstallationControllerConfiguration_To_config_ControllerInstallationControllerConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_ControllerInstallationControllerConfiguration_To_config_ControllerInstallationControllerConfiguration(in *ControllerInstallationControllerConfiguration, out *config.ControllerInstallationControllerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_ControllerInstallationControllerConfiguration_To_config_ControllerInstallationControllerConfiguration(in, out, s) +} + +func autoConvert_config_ControllerInstallationControllerConfiguration_To_v1alpha1_ControllerInstallationControllerConfiguration(in *config.ControllerInstallationControllerConfiguration, out *ControllerInstallationControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + return nil +} + +// Convert_config_ControllerInstallationControllerConfiguration_To_v1alpha1_ControllerInstallationControllerConfiguration is an autogenerated conversion function. +func Convert_config_ControllerInstallationControllerConfiguration_To_v1alpha1_ControllerInstallationControllerConfiguration(in *config.ControllerInstallationControllerConfiguration, out *ControllerInstallationControllerConfiguration, s conversion.Scope) error { + return autoConvert_config_ControllerInstallationControllerConfiguration_To_v1alpha1_ControllerInstallationControllerConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_ControllerInstallationRequiredControllerConfiguration_To_config_ControllerInstallationRequiredControllerConfiguration(in *ControllerInstallationRequiredControllerConfiguration, out *config.ControllerInstallationRequiredControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + return nil +} + +// Convert_v1alpha1_ControllerInstallationRequiredControllerConfiguration_To_config_ControllerInstallationRequiredControllerConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_ControllerInstallationRequiredControllerConfiguration_To_config_ControllerInstallationRequiredControllerConfiguration(in *ControllerInstallationRequiredControllerConfiguration, out *config.ControllerInstallationRequiredControllerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_ControllerInstallationRequiredControllerConfiguration_To_config_ControllerInstallationRequiredControllerConfiguration(in, out, s) +} + +func autoConvert_config_ControllerInstallationRequiredControllerConfiguration_To_v1alpha1_ControllerInstallationRequiredControllerConfiguration(in *config.ControllerInstallationRequiredControllerConfiguration, out *ControllerInstallationRequiredControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + return nil +} + +// Convert_config_ControllerInstallationRequiredControllerConfiguration_To_v1alpha1_ControllerInstallationRequiredControllerConfiguration is an autogenerated conversion function. +func Convert_config_ControllerInstallationRequiredControllerConfiguration_To_v1alpha1_ControllerInstallationRequiredControllerConfiguration(in *config.ControllerInstallationRequiredControllerConfiguration, out *ControllerInstallationRequiredControllerConfiguration, s conversion.Scope) error { + return autoConvert_config_ControllerInstallationRequiredControllerConfiguration_To_v1alpha1_ControllerInstallationRequiredControllerConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_FluentBit_To_config_FluentBit(in *FluentBit, out *config.FluentBit, s conversion.Scope) error { + out.ServiceSection = (*string)(unsafe.Pointer(in.ServiceSection)) + out.InputSection = (*string)(unsafe.Pointer(in.InputSection)) + out.OutputSection = (*string)(unsafe.Pointer(in.OutputSection)) + return nil +} + +// Convert_v1alpha1_FluentBit_To_config_FluentBit is an autogenerated conversion function. +func Convert_v1alpha1_FluentBit_To_config_FluentBit(in *FluentBit, out *config.FluentBit, s conversion.Scope) error { + return autoConvert_v1alpha1_FluentBit_To_config_FluentBit(in, out, s) +} + +func autoConvert_config_FluentBit_To_v1alpha1_FluentBit(in *config.FluentBit, out *FluentBit, s conversion.Scope) error { + out.ServiceSection = (*string)(unsafe.Pointer(in.ServiceSection)) + out.InputSection = (*string)(unsafe.Pointer(in.InputSection)) + out.OutputSection = (*string)(unsafe.Pointer(in.OutputSection)) + return nil +} + +// Convert_config_FluentBit_To_v1alpha1_FluentBit is an autogenerated conversion function. +func Convert_config_FluentBit_To_v1alpha1_FluentBit(in *config.FluentBit, out *FluentBit, s conversion.Scope) error { + return autoConvert_config_FluentBit_To_v1alpha1_FluentBit(in, out, s) +} + +func autoConvert_v1alpha1_GardenClientConnection_To_config_GardenClientConnection(in *GardenClientConnection, out *config.GardenClientConnection, s conversion.Scope) error { + if err := configv1alpha1.Convert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(&in.ClientConnectionConfiguration, &out.ClientConnectionConfiguration, s); err != nil { + return err + } + out.GardenClusterAddress = (*string)(unsafe.Pointer(in.GardenClusterAddress)) + out.GardenClusterCACert = *(*[]byte)(unsafe.Pointer(&in.GardenClusterCACert)) + out.BootstrapKubeconfig = (*corev1.SecretReference)(unsafe.Pointer(in.BootstrapKubeconfig)) + out.KubeconfigSecret = (*corev1.SecretReference)(unsafe.Pointer(in.KubeconfigSecret)) + return nil +} + +// Convert_v1alpha1_GardenClientConnection_To_config_GardenClientConnection is an autogenerated conversion function. +func Convert_v1alpha1_GardenClientConnection_To_config_GardenClientConnection(in *GardenClientConnection, out *config.GardenClientConnection, s conversion.Scope) error { + return autoConvert_v1alpha1_GardenClientConnection_To_config_GardenClientConnection(in, out, s) +} + +func autoConvert_config_GardenClientConnection_To_v1alpha1_GardenClientConnection(in *config.GardenClientConnection, out *GardenClientConnection, s conversion.Scope) error { + if err := configv1alpha1.Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnectionConfiguration, &out.ClientConnectionConfiguration, s); err != nil { + return err + } + out.GardenClusterAddress = (*string)(unsafe.Pointer(in.GardenClusterAddress)) + out.GardenClusterCACert = *(*[]byte)(unsafe.Pointer(&in.GardenClusterCACert)) + out.BootstrapKubeconfig = (*corev1.SecretReference)(unsafe.Pointer(in.BootstrapKubeconfig)) + out.KubeconfigSecret = (*corev1.SecretReference)(unsafe.Pointer(in.KubeconfigSecret)) + return nil +} + +// Convert_config_GardenClientConnection_To_v1alpha1_GardenClientConnection is an autogenerated conversion function. +func Convert_config_GardenClientConnection_To_v1alpha1_GardenClientConnection(in *config.GardenClientConnection, out *GardenClientConnection, s conversion.Scope) error { + return autoConvert_config_GardenClientConnection_To_v1alpha1_GardenClientConnection(in, out, s) +} + +func autoConvert_v1alpha1_GardenletConfiguration_To_config_GardenletConfiguration(in *GardenletConfiguration, out *config.GardenletConfiguration, s conversion.Scope) error { + if in.GardenClientConnection != nil { + in, out := &in.GardenClientConnection, &out.GardenClientConnection + *out = new(config.GardenClientConnection) + if err := Convert_v1alpha1_GardenClientConnection_To_config_GardenClientConnection(*in, *out, s); err != nil { + return err + } + } else { + out.GardenClientConnection = nil + } + if in.SeedClientConnection != nil { + in, out := &in.SeedClientConnection, &out.SeedClientConnection + *out = new(config.SeedClientConnection) + if err := Convert_v1alpha1_SeedClientConnection_To_config_SeedClientConnection(*in, *out, s); err != nil { + return err + } + } else { + out.SeedClientConnection = nil + } + if in.ShootClientConnection != nil { + in, out := &in.ShootClientConnection, &out.ShootClientConnection + *out = new(config.ShootClientConnection) + if err := Convert_v1alpha1_ShootClientConnection_To_config_ShootClientConnection(*in, *out, s); err != nil { + return err + } + } else { + out.ShootClientConnection = nil + } + out.Controllers = (*config.GardenletControllerConfiguration)(unsafe.Pointer(in.Controllers)) + out.Resources = (*config.ResourcesConfiguration)(unsafe.Pointer(in.Resources)) + if in.LeaderElection != nil { + in, out := &in.LeaderElection, &out.LeaderElection + *out = new(config.LeaderElectionConfiguration) + if err := Convert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(*in, *out, s); err != nil { + return err + } + } else { + out.LeaderElection = nil + } + out.LogLevel = (*string)(unsafe.Pointer(in.LogLevel)) + out.KubernetesLogLevel = (*klog.Level)(unsafe.Pointer(in.KubernetesLogLevel)) + out.Server = (*config.ServerConfiguration)(unsafe.Pointer(in.Server)) + out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) + if in.SeedConfig != nil { + in, out := &in.SeedConfig, &out.SeedConfig + *out = new(config.SeedConfig) + if err := Convert_v1alpha1_SeedConfig_To_config_SeedConfig(*in, *out, s); err != nil { + return err + } + } else { + out.SeedConfig = nil + } + out.SeedSelector = (*v1.LabelSelector)(unsafe.Pointer(in.SeedSelector)) + out.Logging = (*config.Logging)(unsafe.Pointer(in.Logging)) + out.SNI = (*config.SNI)(unsafe.Pointer(in.SNI)) + return nil +} + +// Convert_v1alpha1_GardenletConfiguration_To_config_GardenletConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_GardenletConfiguration_To_config_GardenletConfiguration(in *GardenletConfiguration, out *config.GardenletConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_GardenletConfiguration_To_config_GardenletConfiguration(in, out, s) +} + +func autoConvert_config_GardenletConfiguration_To_v1alpha1_GardenletConfiguration(in *config.GardenletConfiguration, out *GardenletConfiguration, s conversion.Scope) error { + if in.GardenClientConnection != nil { + in, out := &in.GardenClientConnection, &out.GardenClientConnection + *out = new(GardenClientConnection) + if err := Convert_config_GardenClientConnection_To_v1alpha1_GardenClientConnection(*in, *out, s); err != nil { + return err + } + } else { + out.GardenClientConnection = nil + } + if in.SeedClientConnection != nil { + in, out := &in.SeedClientConnection, &out.SeedClientConnection + *out = new(SeedClientConnection) + if err := Convert_config_SeedClientConnection_To_v1alpha1_SeedClientConnection(*in, *out, s); err != nil { + return err + } + } else { + out.SeedClientConnection = nil + } + if in.ShootClientConnection != nil { + in, out := &in.ShootClientConnection, &out.ShootClientConnection + *out = new(ShootClientConnection) + if err := Convert_config_ShootClientConnection_To_v1alpha1_ShootClientConnection(*in, *out, s); err != nil { + return err + } + } else { + out.ShootClientConnection = nil + } + out.Controllers = (*GardenletControllerConfiguration)(unsafe.Pointer(in.Controllers)) + out.Resources = (*ResourcesConfiguration)(unsafe.Pointer(in.Resources)) + if in.LeaderElection != nil { + in, out := &in.LeaderElection, &out.LeaderElection + *out = new(LeaderElectionConfiguration) + if err := Convert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(*in, *out, s); err != nil { + return err + } + } else { + out.LeaderElection = nil + } + out.LogLevel = (*string)(unsafe.Pointer(in.LogLevel)) + out.KubernetesLogLevel = (*klog.Level)(unsafe.Pointer(in.KubernetesLogLevel)) + out.Server = (*ServerConfiguration)(unsafe.Pointer(in.Server)) + out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) + if in.SeedConfig != nil { + in, out := &in.SeedConfig, &out.SeedConfig + *out = new(SeedConfig) + if err := Convert_config_SeedConfig_To_v1alpha1_SeedConfig(*in, *out, s); err != nil { + return err + } + } else { + out.SeedConfig = nil + } + out.SeedSelector = (*v1.LabelSelector)(unsafe.Pointer(in.SeedSelector)) + out.Logging = (*Logging)(unsafe.Pointer(in.Logging)) + out.SNI = (*SNI)(unsafe.Pointer(in.SNI)) + return nil +} + +// Convert_config_GardenletConfiguration_To_v1alpha1_GardenletConfiguration is an autogenerated conversion function. +func Convert_config_GardenletConfiguration_To_v1alpha1_GardenletConfiguration(in *config.GardenletConfiguration, out *GardenletConfiguration, s conversion.Scope) error { + return autoConvert_config_GardenletConfiguration_To_v1alpha1_GardenletConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_GardenletControllerConfiguration_To_config_GardenletControllerConfiguration(in *GardenletControllerConfiguration, out *config.GardenletControllerConfiguration, s conversion.Scope) error { + out.BackupBucket = (*config.BackupBucketControllerConfiguration)(unsafe.Pointer(in.BackupBucket)) + out.BackupEntry = (*config.BackupEntryControllerConfiguration)(unsafe.Pointer(in.BackupEntry)) + out.ControllerInstallation = (*config.ControllerInstallationControllerConfiguration)(unsafe.Pointer(in.ControllerInstallation)) + out.ControllerInstallationCare = (*config.ControllerInstallationCareControllerConfiguration)(unsafe.Pointer(in.ControllerInstallationCare)) + out.ControllerInstallationRequired = (*config.ControllerInstallationRequiredControllerConfiguration)(unsafe.Pointer(in.ControllerInstallationRequired)) + out.Seed = (*config.SeedControllerConfiguration)(unsafe.Pointer(in.Seed)) + out.Shoot = (*config.ShootControllerConfiguration)(unsafe.Pointer(in.Shoot)) + out.ShootCare = (*config.ShootCareControllerConfiguration)(unsafe.Pointer(in.ShootCare)) + out.ShootStateSync = (*config.ShootStateSyncControllerConfiguration)(unsafe.Pointer(in.ShootStateSync)) + out.ShootedSeedRegistration = (*config.ShootedSeedRegistrationControllerConfiguration)(unsafe.Pointer(in.ShootedSeedRegistration)) + out.SeedAPIServerNetworkPolicy = (*config.SeedAPIServerNetworkPolicyControllerConfiguration)(unsafe.Pointer(in.SeedAPIServerNetworkPolicy)) + return nil +} + +// Convert_v1alpha1_GardenletControllerConfiguration_To_config_GardenletControllerConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_GardenletControllerConfiguration_To_config_GardenletControllerConfiguration(in *GardenletControllerConfiguration, out *config.GardenletControllerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_GardenletControllerConfiguration_To_config_GardenletControllerConfiguration(in, out, s) +} + +func autoConvert_config_GardenletControllerConfiguration_To_v1alpha1_GardenletControllerConfiguration(in *config.GardenletControllerConfiguration, out *GardenletControllerConfiguration, s conversion.Scope) error { + out.BackupBucket = (*BackupBucketControllerConfiguration)(unsafe.Pointer(in.BackupBucket)) + out.BackupEntry = (*BackupEntryControllerConfiguration)(unsafe.Pointer(in.BackupEntry)) + out.ControllerInstallation = (*ControllerInstallationControllerConfiguration)(unsafe.Pointer(in.ControllerInstallation)) + out.ControllerInstallationCare = (*ControllerInstallationCareControllerConfiguration)(unsafe.Pointer(in.ControllerInstallationCare)) + out.ControllerInstallationRequired = (*ControllerInstallationRequiredControllerConfiguration)(unsafe.Pointer(in.ControllerInstallationRequired)) + out.Seed = (*SeedControllerConfiguration)(unsafe.Pointer(in.Seed)) + out.Shoot = (*ShootControllerConfiguration)(unsafe.Pointer(in.Shoot)) + out.ShootCare = (*ShootCareControllerConfiguration)(unsafe.Pointer(in.ShootCare)) + out.ShootStateSync = (*ShootStateSyncControllerConfiguration)(unsafe.Pointer(in.ShootStateSync)) + out.ShootedSeedRegistration = (*ShootedSeedRegistrationControllerConfiguration)(unsafe.Pointer(in.ShootedSeedRegistration)) + out.SeedAPIServerNetworkPolicy = (*SeedAPIServerNetworkPolicyControllerConfiguration)(unsafe.Pointer(in.SeedAPIServerNetworkPolicy)) + return nil +} + +// Convert_config_GardenletControllerConfiguration_To_v1alpha1_GardenletControllerConfiguration is an autogenerated conversion function. +func Convert_config_GardenletControllerConfiguration_To_v1alpha1_GardenletControllerConfiguration(in *config.GardenletControllerConfiguration, out *GardenletControllerConfiguration, s conversion.Scope) error { + return autoConvert_config_GardenletControllerConfiguration_To_v1alpha1_GardenletControllerConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_HTTPSServer_To_config_HTTPSServer(in *HTTPSServer, out *config.HTTPSServer, s conversion.Scope) error { + if err := Convert_v1alpha1_Server_To_config_Server(&in.Server, &out.Server, s); err != nil { + return err + } + out.TLS = (*config.TLSServer)(unsafe.Pointer(in.TLS)) + return nil +} + +// Convert_v1alpha1_HTTPSServer_To_config_HTTPSServer is an autogenerated conversion function. +func Convert_v1alpha1_HTTPSServer_To_config_HTTPSServer(in *HTTPSServer, out *config.HTTPSServer, s conversion.Scope) error { + return autoConvert_v1alpha1_HTTPSServer_To_config_HTTPSServer(in, out, s) +} + +func autoConvert_config_HTTPSServer_To_v1alpha1_HTTPSServer(in *config.HTTPSServer, out *HTTPSServer, s conversion.Scope) error { + if err := Convert_config_Server_To_v1alpha1_Server(&in.Server, &out.Server, s); err != nil { + return err + } + out.TLS = (*TLSServer)(unsafe.Pointer(in.TLS)) + return nil +} + +// Convert_config_HTTPSServer_To_v1alpha1_HTTPSServer is an autogenerated conversion function. +func Convert_config_HTTPSServer_To_v1alpha1_HTTPSServer(in *config.HTTPSServer, out *HTTPSServer, s conversion.Scope) error { + return autoConvert_config_HTTPSServer_To_v1alpha1_HTTPSServer(in, out, s) +} + +func autoConvert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(in *LeaderElectionConfiguration, out *config.LeaderElectionConfiguration, s conversion.Scope) error { + if err := configv1alpha1.Convert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(&in.LeaderElectionConfiguration, &out.LeaderElectionConfiguration, s); err != nil { + return err + } + out.LockObjectNamespace = (*string)(unsafe.Pointer(in.LockObjectNamespace)) + out.LockObjectName = (*string)(unsafe.Pointer(in.LockObjectName)) + return nil +} + +// Convert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(in *LeaderElectionConfiguration, out *config.LeaderElectionConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(in, out, s) +} + +func autoConvert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *config.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error { + if err := configv1alpha1.Convert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(&in.LeaderElectionConfiguration, &out.LeaderElectionConfiguration, s); err != nil { + return err + } + out.LockObjectNamespace = (*string)(unsafe.Pointer(in.LockObjectNamespace)) + out.LockObjectName = (*string)(unsafe.Pointer(in.LockObjectName)) + return nil +} + +// Convert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration is an autogenerated conversion function. +func Convert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *config.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error { + return autoConvert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_Logging_To_config_Logging(in *Logging, out *config.Logging, s conversion.Scope) error { + out.FluentBit = (*config.FluentBit)(unsafe.Pointer(in.FluentBit)) + return nil +} + +// Convert_v1alpha1_Logging_To_config_Logging is an autogenerated conversion function. +func Convert_v1alpha1_Logging_To_config_Logging(in *Logging, out *config.Logging, s conversion.Scope) error { + return autoConvert_v1alpha1_Logging_To_config_Logging(in, out, s) +} + +func autoConvert_config_Logging_To_v1alpha1_Logging(in *config.Logging, out *Logging, s conversion.Scope) error { + out.FluentBit = (*FluentBit)(unsafe.Pointer(in.FluentBit)) + return nil +} + +// Convert_config_Logging_To_v1alpha1_Logging is an autogenerated conversion function. +func Convert_config_Logging_To_v1alpha1_Logging(in *config.Logging, out *Logging, s conversion.Scope) error { + return autoConvert_config_Logging_To_v1alpha1_Logging(in, out, s) +} + +func autoConvert_v1alpha1_ResourcesConfiguration_To_config_ResourcesConfiguration(in *ResourcesConfiguration, out *config.ResourcesConfiguration, s conversion.Scope) error { + out.Capacity = *(*corev1.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Reserved = *(*corev1.ResourceList)(unsafe.Pointer(&in.Reserved)) + return nil +} + +// Convert_v1alpha1_ResourcesConfiguration_To_config_ResourcesConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_ResourcesConfiguration_To_config_ResourcesConfiguration(in *ResourcesConfiguration, out *config.ResourcesConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_ResourcesConfiguration_To_config_ResourcesConfiguration(in, out, s) +} + +func autoConvert_config_ResourcesConfiguration_To_v1alpha1_ResourcesConfiguration(in *config.ResourcesConfiguration, out *ResourcesConfiguration, s conversion.Scope) error { + out.Capacity = *(*corev1.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Reserved = *(*corev1.ResourceList)(unsafe.Pointer(&in.Reserved)) + return nil +} + +// Convert_config_ResourcesConfiguration_To_v1alpha1_ResourcesConfiguration is an autogenerated conversion function. +func Convert_config_ResourcesConfiguration_To_v1alpha1_ResourcesConfiguration(in *config.ResourcesConfiguration, out *ResourcesConfiguration, s conversion.Scope) error { + return autoConvert_config_ResourcesConfiguration_To_v1alpha1_ResourcesConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_SNI_To_config_SNI(in *SNI, out *config.SNI, s conversion.Scope) error { + out.Ingress = (*config.SNIIngress)(unsafe.Pointer(in.Ingress)) + return nil +} + +// Convert_v1alpha1_SNI_To_config_SNI is an autogenerated conversion function. +func Convert_v1alpha1_SNI_To_config_SNI(in *SNI, out *config.SNI, s conversion.Scope) error { + return autoConvert_v1alpha1_SNI_To_config_SNI(in, out, s) +} + +func autoConvert_config_SNI_To_v1alpha1_SNI(in *config.SNI, out *SNI, s conversion.Scope) error { + out.Ingress = (*SNIIngress)(unsafe.Pointer(in.Ingress)) + return nil +} + +// Convert_config_SNI_To_v1alpha1_SNI is an autogenerated conversion function. +func Convert_config_SNI_To_v1alpha1_SNI(in *config.SNI, out *SNI, s conversion.Scope) error { + return autoConvert_config_SNI_To_v1alpha1_SNI(in, out, s) +} + +func autoConvert_v1alpha1_SNIIngress_To_config_SNIIngress(in *SNIIngress, out *config.SNIIngress, s conversion.Scope) error { + out.ServiceName = (*string)(unsafe.Pointer(in.ServiceName)) + out.Namespace = (*string)(unsafe.Pointer(in.Namespace)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + return nil +} + +// Convert_v1alpha1_SNIIngress_To_config_SNIIngress is an autogenerated conversion function. +func Convert_v1alpha1_SNIIngress_To_config_SNIIngress(in *SNIIngress, out *config.SNIIngress, s conversion.Scope) error { + return autoConvert_v1alpha1_SNIIngress_To_config_SNIIngress(in, out, s) +} + +func autoConvert_config_SNIIngress_To_v1alpha1_SNIIngress(in *config.SNIIngress, out *SNIIngress, s conversion.Scope) error { + out.ServiceName = (*string)(unsafe.Pointer(in.ServiceName)) + out.Namespace = (*string)(unsafe.Pointer(in.Namespace)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + return nil +} + +// Convert_config_SNIIngress_To_v1alpha1_SNIIngress is an autogenerated conversion function. +func Convert_config_SNIIngress_To_v1alpha1_SNIIngress(in *config.SNIIngress, out *SNIIngress, s conversion.Scope) error { + return autoConvert_config_SNIIngress_To_v1alpha1_SNIIngress(in, out, s) +} + +func autoConvert_v1alpha1_SeedAPIServerNetworkPolicyControllerConfiguration_To_config_SeedAPIServerNetworkPolicyControllerConfiguration(in *SeedAPIServerNetworkPolicyControllerConfiguration, out *config.SeedAPIServerNetworkPolicyControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + return nil +} + +// Convert_v1alpha1_SeedAPIServerNetworkPolicyControllerConfiguration_To_config_SeedAPIServerNetworkPolicyControllerConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_SeedAPIServerNetworkPolicyControllerConfiguration_To_config_SeedAPIServerNetworkPolicyControllerConfiguration(in *SeedAPIServerNetworkPolicyControllerConfiguration, out *config.SeedAPIServerNetworkPolicyControllerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedAPIServerNetworkPolicyControllerConfiguration_To_config_SeedAPIServerNetworkPolicyControllerConfiguration(in, out, s) +} + +func autoConvert_config_SeedAPIServerNetworkPolicyControllerConfiguration_To_v1alpha1_SeedAPIServerNetworkPolicyControllerConfiguration(in *config.SeedAPIServerNetworkPolicyControllerConfiguration, out *SeedAPIServerNetworkPolicyControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + return nil +} + +// Convert_config_SeedAPIServerNetworkPolicyControllerConfiguration_To_v1alpha1_SeedAPIServerNetworkPolicyControllerConfiguration is an autogenerated conversion function. +func Convert_config_SeedAPIServerNetworkPolicyControllerConfiguration_To_v1alpha1_SeedAPIServerNetworkPolicyControllerConfiguration(in *config.SeedAPIServerNetworkPolicyControllerConfiguration, out *SeedAPIServerNetworkPolicyControllerConfiguration, s conversion.Scope) error { + return autoConvert_config_SeedAPIServerNetworkPolicyControllerConfiguration_To_v1alpha1_SeedAPIServerNetworkPolicyControllerConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_SeedClientConnection_To_config_SeedClientConnection(in *SeedClientConnection, out *config.SeedClientConnection, s conversion.Scope) error { + if err := configv1alpha1.Convert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(&in.ClientConnectionConfiguration, &out.ClientConnectionConfiguration, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_SeedClientConnection_To_config_SeedClientConnection is an autogenerated conversion function. +func Convert_v1alpha1_SeedClientConnection_To_config_SeedClientConnection(in *SeedClientConnection, out *config.SeedClientConnection, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedClientConnection_To_config_SeedClientConnection(in, out, s) +} + +func autoConvert_config_SeedClientConnection_To_v1alpha1_SeedClientConnection(in *config.SeedClientConnection, out *SeedClientConnection, s conversion.Scope) error { + if err := configv1alpha1.Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnectionConfiguration, &out.ClientConnectionConfiguration, s); err != nil { + return err + } + return nil +} + +// Convert_config_SeedClientConnection_To_v1alpha1_SeedClientConnection is an autogenerated conversion function. +func Convert_config_SeedClientConnection_To_v1alpha1_SeedClientConnection(in *config.SeedClientConnection, out *SeedClientConnection, s conversion.Scope) error { + return autoConvert_config_SeedClientConnection_To_v1alpha1_SeedClientConnection(in, out, s) +} + +func autoConvert_v1alpha1_SeedConfig_To_config_SeedConfig(in *SeedConfig, out *config.SeedConfig, s conversion.Scope) error { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.SeedTemplate, &out.SeedTemplate, 0); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_SeedConfig_To_config_SeedConfig is an autogenerated conversion function. +func Convert_v1alpha1_SeedConfig_To_config_SeedConfig(in *SeedConfig, out *config.SeedConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedConfig_To_config_SeedConfig(in, out, s) +} + +func autoConvert_config_SeedConfig_To_v1alpha1_SeedConfig(in *config.SeedConfig, out *SeedConfig, s conversion.Scope) error { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.SeedTemplate, &out.SeedTemplate, 0); err != nil { + return err + } + return nil +} + +// Convert_config_SeedConfig_To_v1alpha1_SeedConfig is an autogenerated conversion function. +func Convert_config_SeedConfig_To_v1alpha1_SeedConfig(in *config.SeedConfig, out *SeedConfig, s conversion.Scope) error { + return autoConvert_config_SeedConfig_To_v1alpha1_SeedConfig(in, out, s) +} + +func autoConvert_v1alpha1_SeedControllerConfiguration_To_config_SeedControllerConfiguration(in *SeedControllerConfiguration, out *config.SeedControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + out.SyncPeriod = (*v1.Duration)(unsafe.Pointer(in.SyncPeriod)) + return nil +} + +// Convert_v1alpha1_SeedControllerConfiguration_To_config_SeedControllerConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_SeedControllerConfiguration_To_config_SeedControllerConfiguration(in *SeedControllerConfiguration, out *config.SeedControllerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_SeedControllerConfiguration_To_config_SeedControllerConfiguration(in, out, s) +} + +func autoConvert_config_SeedControllerConfiguration_To_v1alpha1_SeedControllerConfiguration(in *config.SeedControllerConfiguration, out *SeedControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + out.SyncPeriod = (*v1.Duration)(unsafe.Pointer(in.SyncPeriod)) + return nil +} + +// Convert_config_SeedControllerConfiguration_To_v1alpha1_SeedControllerConfiguration is an autogenerated conversion function. +func Convert_config_SeedControllerConfiguration_To_v1alpha1_SeedControllerConfiguration(in *config.SeedControllerConfiguration, out *SeedControllerConfiguration, s conversion.Scope) error { + return autoConvert_config_SeedControllerConfiguration_To_v1alpha1_SeedControllerConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_Server_To_config_Server(in *Server, out *config.Server, s conversion.Scope) error { + out.BindAddress = in.BindAddress + out.Port = in.Port + return nil +} + +// Convert_v1alpha1_Server_To_config_Server is an autogenerated conversion function. +func Convert_v1alpha1_Server_To_config_Server(in *Server, out *config.Server, s conversion.Scope) error { + return autoConvert_v1alpha1_Server_To_config_Server(in, out, s) +} + +func autoConvert_config_Server_To_v1alpha1_Server(in *config.Server, out *Server, s conversion.Scope) error { + out.BindAddress = in.BindAddress + out.Port = in.Port + return nil +} + +// Convert_config_Server_To_v1alpha1_Server is an autogenerated conversion function. +func Convert_config_Server_To_v1alpha1_Server(in *config.Server, out *Server, s conversion.Scope) error { + return autoConvert_config_Server_To_v1alpha1_Server(in, out, s) +} + +func autoConvert_v1alpha1_ServerConfiguration_To_config_ServerConfiguration(in *ServerConfiguration, out *config.ServerConfiguration, s conversion.Scope) error { + if err := Convert_v1alpha1_HTTPSServer_To_config_HTTPSServer(&in.HTTPS, &out.HTTPS, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_ServerConfiguration_To_config_ServerConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_ServerConfiguration_To_config_ServerConfiguration(in *ServerConfiguration, out *config.ServerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_ServerConfiguration_To_config_ServerConfiguration(in, out, s) +} + +func autoConvert_config_ServerConfiguration_To_v1alpha1_ServerConfiguration(in *config.ServerConfiguration, out *ServerConfiguration, s conversion.Scope) error { + if err := Convert_config_HTTPSServer_To_v1alpha1_HTTPSServer(&in.HTTPS, &out.HTTPS, s); err != nil { + return err + } + return nil +} + +// Convert_config_ServerConfiguration_To_v1alpha1_ServerConfiguration is an autogenerated conversion function. +func Convert_config_ServerConfiguration_To_v1alpha1_ServerConfiguration(in *config.ServerConfiguration, out *ServerConfiguration, s conversion.Scope) error { + return autoConvert_config_ServerConfiguration_To_v1alpha1_ServerConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_ShootCareControllerConfiguration_To_config_ShootCareControllerConfiguration(in *ShootCareControllerConfiguration, out *config.ShootCareControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + out.SyncPeriod = (*v1.Duration)(unsafe.Pointer(in.SyncPeriod)) + out.StaleExtensionHealthChecks = (*config.StaleExtensionHealthChecks)(unsafe.Pointer(in.StaleExtensionHealthChecks)) + out.ConditionThresholds = *(*[]config.ConditionThreshold)(unsafe.Pointer(&in.ConditionThresholds)) + return nil +} + +// Convert_v1alpha1_ShootCareControllerConfiguration_To_config_ShootCareControllerConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_ShootCareControllerConfiguration_To_config_ShootCareControllerConfiguration(in *ShootCareControllerConfiguration, out *config.ShootCareControllerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_ShootCareControllerConfiguration_To_config_ShootCareControllerConfiguration(in, out, s) +} + +func autoConvert_config_ShootCareControllerConfiguration_To_v1alpha1_ShootCareControllerConfiguration(in *config.ShootCareControllerConfiguration, out *ShootCareControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + out.SyncPeriod = (*v1.Duration)(unsafe.Pointer(in.SyncPeriod)) + out.StaleExtensionHealthChecks = (*StaleExtensionHealthChecks)(unsafe.Pointer(in.StaleExtensionHealthChecks)) + out.ConditionThresholds = *(*[]ConditionThreshold)(unsafe.Pointer(&in.ConditionThresholds)) + return nil +} + +// Convert_config_ShootCareControllerConfiguration_To_v1alpha1_ShootCareControllerConfiguration is an autogenerated conversion function. +func Convert_config_ShootCareControllerConfiguration_To_v1alpha1_ShootCareControllerConfiguration(in *config.ShootCareControllerConfiguration, out *ShootCareControllerConfiguration, s conversion.Scope) error { + return autoConvert_config_ShootCareControllerConfiguration_To_v1alpha1_ShootCareControllerConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_ShootClientConnection_To_config_ShootClientConnection(in *ShootClientConnection, out *config.ShootClientConnection, s conversion.Scope) error { + if err := configv1alpha1.Convert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(&in.ClientConnectionConfiguration, &out.ClientConnectionConfiguration, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_ShootClientConnection_To_config_ShootClientConnection is an autogenerated conversion function. +func Convert_v1alpha1_ShootClientConnection_To_config_ShootClientConnection(in *ShootClientConnection, out *config.ShootClientConnection, s conversion.Scope) error { + return autoConvert_v1alpha1_ShootClientConnection_To_config_ShootClientConnection(in, out, s) +} + +func autoConvert_config_ShootClientConnection_To_v1alpha1_ShootClientConnection(in *config.ShootClientConnection, out *ShootClientConnection, s conversion.Scope) error { + if err := configv1alpha1.Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnectionConfiguration, &out.ClientConnectionConfiguration, s); err != nil { + return err + } + return nil +} + +// Convert_config_ShootClientConnection_To_v1alpha1_ShootClientConnection is an autogenerated conversion function. +func Convert_config_ShootClientConnection_To_v1alpha1_ShootClientConnection(in *config.ShootClientConnection, out *ShootClientConnection, s conversion.Scope) error { + return autoConvert_config_ShootClientConnection_To_v1alpha1_ShootClientConnection(in, out, s) +} + +func autoConvert_v1alpha1_ShootControllerConfiguration_To_config_ShootControllerConfiguration(in *ShootControllerConfiguration, out *config.ShootControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + out.ProgressReportPeriod = (*v1.Duration)(unsafe.Pointer(in.ProgressReportPeriod)) + out.ReconcileInMaintenanceOnly = (*bool)(unsafe.Pointer(in.ReconcileInMaintenanceOnly)) + out.RespectSyncPeriodOverwrite = (*bool)(unsafe.Pointer(in.RespectSyncPeriodOverwrite)) + out.RetryDuration = (*v1.Duration)(unsafe.Pointer(in.RetryDuration)) + out.SyncPeriod = (*v1.Duration)(unsafe.Pointer(in.SyncPeriod)) + out.DNSEntryTTLSeconds = (*int64)(unsafe.Pointer(in.DNSEntryTTLSeconds)) + return nil +} + +// Convert_v1alpha1_ShootControllerConfiguration_To_config_ShootControllerConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_ShootControllerConfiguration_To_config_ShootControllerConfiguration(in *ShootControllerConfiguration, out *config.ShootControllerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_ShootControllerConfiguration_To_config_ShootControllerConfiguration(in, out, s) +} + +func autoConvert_config_ShootControllerConfiguration_To_v1alpha1_ShootControllerConfiguration(in *config.ShootControllerConfiguration, out *ShootControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + out.ProgressReportPeriod = (*v1.Duration)(unsafe.Pointer(in.ProgressReportPeriod)) + out.ReconcileInMaintenanceOnly = (*bool)(unsafe.Pointer(in.ReconcileInMaintenanceOnly)) + out.RespectSyncPeriodOverwrite = (*bool)(unsafe.Pointer(in.RespectSyncPeriodOverwrite)) + out.RetryDuration = (*v1.Duration)(unsafe.Pointer(in.RetryDuration)) + out.SyncPeriod = (*v1.Duration)(unsafe.Pointer(in.SyncPeriod)) + out.DNSEntryTTLSeconds = (*int64)(unsafe.Pointer(in.DNSEntryTTLSeconds)) + return nil +} + +// Convert_config_ShootControllerConfiguration_To_v1alpha1_ShootControllerConfiguration is an autogenerated conversion function. +func Convert_config_ShootControllerConfiguration_To_v1alpha1_ShootControllerConfiguration(in *config.ShootControllerConfiguration, out *ShootControllerConfiguration, s conversion.Scope) error { + return autoConvert_config_ShootControllerConfiguration_To_v1alpha1_ShootControllerConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_ShootStateSyncControllerConfiguration_To_config_ShootStateSyncControllerConfiguration(in *ShootStateSyncControllerConfiguration, out *config.ShootStateSyncControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + out.SyncPeriod = (*v1.Duration)(unsafe.Pointer(in.SyncPeriod)) + return nil +} + +// Convert_v1alpha1_ShootStateSyncControllerConfiguration_To_config_ShootStateSyncControllerConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_ShootStateSyncControllerConfiguration_To_config_ShootStateSyncControllerConfiguration(in *ShootStateSyncControllerConfiguration, out *config.ShootStateSyncControllerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_ShootStateSyncControllerConfiguration_To_config_ShootStateSyncControllerConfiguration(in, out, s) +} + +func autoConvert_config_ShootStateSyncControllerConfiguration_To_v1alpha1_ShootStateSyncControllerConfiguration(in *config.ShootStateSyncControllerConfiguration, out *ShootStateSyncControllerConfiguration, s conversion.Scope) error { + out.ConcurrentSyncs = (*int)(unsafe.Pointer(in.ConcurrentSyncs)) + out.SyncPeriod = (*v1.Duration)(unsafe.Pointer(in.SyncPeriod)) + return nil +} + +// Convert_config_ShootStateSyncControllerConfiguration_To_v1alpha1_ShootStateSyncControllerConfiguration is an autogenerated conversion function. +func Convert_config_ShootStateSyncControllerConfiguration_To_v1alpha1_ShootStateSyncControllerConfiguration(in *config.ShootStateSyncControllerConfiguration, out *ShootStateSyncControllerConfiguration, s conversion.Scope) error { + return autoConvert_config_ShootStateSyncControllerConfiguration_To_v1alpha1_ShootStateSyncControllerConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_ShootedSeedRegistrationControllerConfiguration_To_config_ShootedSeedRegistrationControllerConfiguration(in *ShootedSeedRegistrationControllerConfiguration, out *config.ShootedSeedRegistrationControllerConfiguration, s conversion.Scope) error { + out.SyncJitterPeriod = (*v1.Duration)(unsafe.Pointer(in.SyncJitterPeriod)) + return nil +} + +// Convert_v1alpha1_ShootedSeedRegistrationControllerConfiguration_To_config_ShootedSeedRegistrationControllerConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_ShootedSeedRegistrationControllerConfiguration_To_config_ShootedSeedRegistrationControllerConfiguration(in *ShootedSeedRegistrationControllerConfiguration, out *config.ShootedSeedRegistrationControllerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_ShootedSeedRegistrationControllerConfiguration_To_config_ShootedSeedRegistrationControllerConfiguration(in, out, s) +} + +func autoConvert_config_ShootedSeedRegistrationControllerConfiguration_To_v1alpha1_ShootedSeedRegistrationControllerConfiguration(in *config.ShootedSeedRegistrationControllerConfiguration, out *ShootedSeedRegistrationControllerConfiguration, s conversion.Scope) error { + out.SyncJitterPeriod = (*v1.Duration)(unsafe.Pointer(in.SyncJitterPeriod)) + return nil +} + +// Convert_config_ShootedSeedRegistrationControllerConfiguration_To_v1alpha1_ShootedSeedRegistrationControllerConfiguration is an autogenerated conversion function. +func Convert_config_ShootedSeedRegistrationControllerConfiguration_To_v1alpha1_ShootedSeedRegistrationControllerConfiguration(in *config.ShootedSeedRegistrationControllerConfiguration, out *ShootedSeedRegistrationControllerConfiguration, s conversion.Scope) error { + return autoConvert_config_ShootedSeedRegistrationControllerConfiguration_To_v1alpha1_ShootedSeedRegistrationControllerConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_StaleExtensionHealthChecks_To_config_StaleExtensionHealthChecks(in *StaleExtensionHealthChecks, out *config.StaleExtensionHealthChecks, s conversion.Scope) error { + out.Enabled = in.Enabled + out.Threshold = (*v1.Duration)(unsafe.Pointer(in.Threshold)) + return nil +} + +// Convert_v1alpha1_StaleExtensionHealthChecks_To_config_StaleExtensionHealthChecks is an autogenerated conversion function. +func Convert_v1alpha1_StaleExtensionHealthChecks_To_config_StaleExtensionHealthChecks(in *StaleExtensionHealthChecks, out *config.StaleExtensionHealthChecks, s conversion.Scope) error { + return autoConvert_v1alpha1_StaleExtensionHealthChecks_To_config_StaleExtensionHealthChecks(in, out, s) +} + +func autoConvert_config_StaleExtensionHealthChecks_To_v1alpha1_StaleExtensionHealthChecks(in *config.StaleExtensionHealthChecks, out *StaleExtensionHealthChecks, s conversion.Scope) error { + out.Enabled = in.Enabled + out.Threshold = (*v1.Duration)(unsafe.Pointer(in.Threshold)) + return nil +} + +// Convert_config_StaleExtensionHealthChecks_To_v1alpha1_StaleExtensionHealthChecks is an autogenerated conversion function. +func Convert_config_StaleExtensionHealthChecks_To_v1alpha1_StaleExtensionHealthChecks(in *config.StaleExtensionHealthChecks, out *StaleExtensionHealthChecks, s conversion.Scope) error { + return autoConvert_config_StaleExtensionHealthChecks_To_v1alpha1_StaleExtensionHealthChecks(in, out, s) +} + +func autoConvert_v1alpha1_TLSServer_To_config_TLSServer(in *TLSServer, out *config.TLSServer, s conversion.Scope) error { + out.ServerCertPath = in.ServerCertPath + out.ServerKeyPath = in.ServerKeyPath + return nil +} + +// Convert_v1alpha1_TLSServer_To_config_TLSServer is an autogenerated conversion function. +func Convert_v1alpha1_TLSServer_To_config_TLSServer(in *TLSServer, out *config.TLSServer, s conversion.Scope) error { + return autoConvert_v1alpha1_TLSServer_To_config_TLSServer(in, out, s) +} + +func autoConvert_config_TLSServer_To_v1alpha1_TLSServer(in *config.TLSServer, out *TLSServer, s conversion.Scope) error { + out.ServerCertPath = in.ServerCertPath + out.ServerKeyPath = in.ServerKeyPath + return nil +} + +// Convert_config_TLSServer_To_v1alpha1_TLSServer is an autogenerated conversion function. +func Convert_config_TLSServer_To_v1alpha1_TLSServer(in *config.TLSServer, out *TLSServer, s conversion.Scope) error { + return autoConvert_config_TLSServer_To_v1alpha1_TLSServer(in, out, s) +} diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..d668adf7d --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,852 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + klog "k8s.io/klog" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucketControllerConfiguration) DeepCopyInto(out *BackupBucketControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketControllerConfiguration. +func (in *BackupBucketControllerConfiguration) DeepCopy() *BackupBucketControllerConfiguration { + if in == nil { + return nil + } + out := new(BackupBucketControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupEntryControllerConfiguration) DeepCopyInto(out *BackupEntryControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + if in.DeletionGracePeriodHours != nil { + in, out := &in.DeletionGracePeriodHours, &out.DeletionGracePeriodHours + *out = new(int) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntryControllerConfiguration. +func (in *BackupEntryControllerConfiguration) DeepCopy() *BackupEntryControllerConfiguration { + if in == nil { + return nil + } + out := new(BackupEntryControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionThreshold) DeepCopyInto(out *ConditionThreshold) { + *out = *in + out.Duration = in.Duration + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionThreshold. +func (in *ConditionThreshold) DeepCopy() *ConditionThreshold { + if in == nil { + return nil + } + out := new(ConditionThreshold) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerInstallationCareControllerConfiguration) DeepCopyInto(out *ControllerInstallationCareControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationCareControllerConfiguration. +func (in *ControllerInstallationCareControllerConfiguration) DeepCopy() *ControllerInstallationCareControllerConfiguration { + if in == nil { + return nil + } + out := new(ControllerInstallationCareControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerInstallationControllerConfiguration) DeepCopyInto(out *ControllerInstallationControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationControllerConfiguration. +func (in *ControllerInstallationControllerConfiguration) DeepCopy() *ControllerInstallationControllerConfiguration { + if in == nil { + return nil + } + out := new(ControllerInstallationControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerInstallationRequiredControllerConfiguration) DeepCopyInto(out *ControllerInstallationRequiredControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationRequiredControllerConfiguration. +func (in *ControllerInstallationRequiredControllerConfiguration) DeepCopy() *ControllerInstallationRequiredControllerConfiguration { + if in == nil { + return nil + } + out := new(ControllerInstallationRequiredControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FluentBit) DeepCopyInto(out *FluentBit) { + *out = *in + if in.ServiceSection != nil { + in, out := &in.ServiceSection, &out.ServiceSection + *out = new(string) + **out = **in + } + if in.InputSection != nil { + in, out := &in.InputSection, &out.InputSection + *out = new(string) + **out = **in + } + if in.OutputSection != nil { + in, out := &in.OutputSection, &out.OutputSection + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentBit. +func (in *FluentBit) DeepCopy() *FluentBit { + if in == nil { + return nil + } + out := new(FluentBit) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GardenClientConnection) DeepCopyInto(out *GardenClientConnection) { + *out = *in + out.ClientConnectionConfiguration = in.ClientConnectionConfiguration + if in.GardenClusterAddress != nil { + in, out := &in.GardenClusterAddress, &out.GardenClusterAddress + *out = new(string) + **out = **in + } + if in.GardenClusterCACert != nil { + in, out := &in.GardenClusterCACert, &out.GardenClusterCACert + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.BootstrapKubeconfig != nil { + in, out := &in.BootstrapKubeconfig, &out.BootstrapKubeconfig + *out = new(corev1.SecretReference) + **out = **in + } + if in.KubeconfigSecret != nil { + in, out := &in.KubeconfigSecret, &out.KubeconfigSecret + *out = new(corev1.SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GardenClientConnection. +func (in *GardenClientConnection) DeepCopy() *GardenClientConnection { + if in == nil { + return nil + } + out := new(GardenClientConnection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GardenletConfiguration) DeepCopyInto(out *GardenletConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.GardenClientConnection != nil { + in, out := &in.GardenClientConnection, &out.GardenClientConnection + *out = new(GardenClientConnection) + (*in).DeepCopyInto(*out) + } + if in.SeedClientConnection != nil { + in, out := &in.SeedClientConnection, &out.SeedClientConnection + *out = new(SeedClientConnection) + **out = **in + } + if in.ShootClientConnection != nil { + in, out := &in.ShootClientConnection, &out.ShootClientConnection + *out = new(ShootClientConnection) + **out = **in + } + if in.Controllers != nil { + in, out := &in.Controllers, &out.Controllers + *out = new(GardenletControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourcesConfiguration) + (*in).DeepCopyInto(*out) + } + if in.LeaderElection != nil { + in, out := &in.LeaderElection, &out.LeaderElection + *out = new(LeaderElectionConfiguration) + (*in).DeepCopyInto(*out) + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } + if in.KubernetesLogLevel != nil { + in, out := &in.KubernetesLogLevel, &out.KubernetesLogLevel + *out = new(klog.Level) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(ServerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.SeedConfig != nil { + in, out := &in.SeedConfig, &out.SeedConfig + *out = new(SeedConfig) + (*in).DeepCopyInto(*out) + } + if in.SeedSelector != nil { + in, out := &in.SeedSelector, &out.SeedSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(Logging) + (*in).DeepCopyInto(*out) + } + if in.SNI != nil { + in, out := &in.SNI, &out.SNI + *out = new(SNI) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GardenletConfiguration. +func (in *GardenletConfiguration) DeepCopy() *GardenletConfiguration { + if in == nil { + return nil + } + out := new(GardenletConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GardenletConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GardenletControllerConfiguration) DeepCopyInto(out *GardenletControllerConfiguration) { + *out = *in + if in.BackupBucket != nil { + in, out := &in.BackupBucket, &out.BackupBucket + *out = new(BackupBucketControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.BackupEntry != nil { + in, out := &in.BackupEntry, &out.BackupEntry + *out = new(BackupEntryControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ControllerInstallation != nil { + in, out := &in.ControllerInstallation, &out.ControllerInstallation + *out = new(ControllerInstallationControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ControllerInstallationCare != nil { + in, out := &in.ControllerInstallationCare, &out.ControllerInstallationCare + *out = new(ControllerInstallationCareControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ControllerInstallationRequired != nil { + in, out := &in.ControllerInstallationRequired, &out.ControllerInstallationRequired + *out = new(ControllerInstallationRequiredControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Seed != nil { + in, out := &in.Seed, &out.Seed + *out = new(SeedControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Shoot != nil { + in, out := &in.Shoot, &out.Shoot + *out = new(ShootControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ShootCare != nil { + in, out := &in.ShootCare, &out.ShootCare + *out = new(ShootCareControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ShootStateSync != nil { + in, out := &in.ShootStateSync, &out.ShootStateSync + *out = new(ShootStateSyncControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ShootedSeedRegistration != nil { + in, out := &in.ShootedSeedRegistration, &out.ShootedSeedRegistration + *out = new(ShootedSeedRegistrationControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.SeedAPIServerNetworkPolicy != nil { + in, out := &in.SeedAPIServerNetworkPolicy, &out.SeedAPIServerNetworkPolicy + *out = new(SeedAPIServerNetworkPolicyControllerConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GardenletControllerConfiguration. +func (in *GardenletControllerConfiguration) DeepCopy() *GardenletControllerConfiguration { + if in == nil { + return nil + } + out := new(GardenletControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPSServer) DeepCopyInto(out *HTTPSServer) { + *out = *in + out.Server = in.Server + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSServer) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPSServer. +func (in *HTTPSServer) DeepCopy() *HTTPSServer { + if in == nil { + return nil + } + out := new(HTTPSServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaderElectionConfiguration) DeepCopyInto(out *LeaderElectionConfiguration) { + *out = *in + in.LeaderElectionConfiguration.DeepCopyInto(&out.LeaderElectionConfiguration) + if in.LockObjectNamespace != nil { + in, out := &in.LockObjectNamespace, &out.LockObjectNamespace + *out = new(string) + **out = **in + } + if in.LockObjectName != nil { + in, out := &in.LockObjectName, &out.LockObjectName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElectionConfiguration. +func (in *LeaderElectionConfiguration) DeepCopy() *LeaderElectionConfiguration { + if in == nil { + return nil + } + out := new(LeaderElectionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Logging) DeepCopyInto(out *Logging) { + *out = *in + if in.FluentBit != nil { + in, out := &in.FluentBit, &out.FluentBit + *out = new(FluentBit) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Logging. +func (in *Logging) DeepCopy() *Logging { + if in == nil { + return nil + } + out := new(Logging) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesConfiguration) DeepCopyInto(out *ResourcesConfiguration) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Reserved != nil { + in, out := &in.Reserved, &out.Reserved + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesConfiguration. +func (in *ResourcesConfiguration) DeepCopy() *ResourcesConfiguration { + if in == nil { + return nil + } + out := new(ResourcesConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SNI) DeepCopyInto(out *SNI) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = new(SNIIngress) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SNI. +func (in *SNI) DeepCopy() *SNI { + if in == nil { + return nil + } + out := new(SNI) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SNIIngress) DeepCopyInto(out *SNIIngress) { + *out = *in + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SNIIngress. +func (in *SNIIngress) DeepCopy() *SNIIngress { + if in == nil { + return nil + } + out := new(SNIIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedAPIServerNetworkPolicyControllerConfiguration) DeepCopyInto(out *SeedAPIServerNetworkPolicyControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedAPIServerNetworkPolicyControllerConfiguration. +func (in *SeedAPIServerNetworkPolicyControllerConfiguration) DeepCopy() *SeedAPIServerNetworkPolicyControllerConfiguration { + if in == nil { + return nil + } + out := new(SeedAPIServerNetworkPolicyControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedClientConnection) DeepCopyInto(out *SeedClientConnection) { + *out = *in + out.ClientConnectionConfiguration = in.ClientConnectionConfiguration + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedClientConnection. +func (in *SeedClientConnection) DeepCopy() *SeedClientConnection { + if in == nil { + return nil + } + out := new(SeedClientConnection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedConfig) DeepCopyInto(out *SeedConfig) { + *out = *in + in.SeedTemplate.DeepCopyInto(&out.SeedTemplate) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedConfig. +func (in *SeedConfig) DeepCopy() *SeedConfig { + if in == nil { + return nil + } + out := new(SeedConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedControllerConfiguration) DeepCopyInto(out *SeedControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedControllerConfiguration. +func (in *SeedControllerConfiguration) DeepCopy() *SeedControllerConfiguration { + if in == nil { + return nil + } + out := new(SeedControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Server) DeepCopyInto(out *Server) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server. +func (in *Server) DeepCopy() *Server { + if in == nil { + return nil + } + out := new(Server) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerConfiguration) DeepCopyInto(out *ServerConfiguration) { + *out = *in + in.HTTPS.DeepCopyInto(&out.HTTPS) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerConfiguration. +func (in *ServerConfiguration) DeepCopy() *ServerConfiguration { + if in == nil { + return nil + } + out := new(ServerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootCareControllerConfiguration) DeepCopyInto(out *ShootCareControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(v1.Duration) + **out = **in + } + if in.StaleExtensionHealthChecks != nil { + in, out := &in.StaleExtensionHealthChecks, &out.StaleExtensionHealthChecks + *out = new(StaleExtensionHealthChecks) + (*in).DeepCopyInto(*out) + } + if in.ConditionThresholds != nil { + in, out := &in.ConditionThresholds, &out.ConditionThresholds + *out = make([]ConditionThreshold, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootCareControllerConfiguration. +func (in *ShootCareControllerConfiguration) DeepCopy() *ShootCareControllerConfiguration { + if in == nil { + return nil + } + out := new(ShootCareControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootClientConnection) DeepCopyInto(out *ShootClientConnection) { + *out = *in + out.ClientConnectionConfiguration = in.ClientConnectionConfiguration + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootClientConnection. +func (in *ShootClientConnection) DeepCopy() *ShootClientConnection { + if in == nil { + return nil + } + out := new(ShootClientConnection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootControllerConfiguration) DeepCopyInto(out *ShootControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + if in.ProgressReportPeriod != nil { + in, out := &in.ProgressReportPeriod, &out.ProgressReportPeriod + *out = new(v1.Duration) + **out = **in + } + if in.ReconcileInMaintenanceOnly != nil { + in, out := &in.ReconcileInMaintenanceOnly, &out.ReconcileInMaintenanceOnly + *out = new(bool) + **out = **in + } + if in.RespectSyncPeriodOverwrite != nil { + in, out := &in.RespectSyncPeriodOverwrite, &out.RespectSyncPeriodOverwrite + *out = new(bool) + **out = **in + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(v1.Duration) + **out = **in + } + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(v1.Duration) + **out = **in + } + if in.DNSEntryTTLSeconds != nil { + in, out := &in.DNSEntryTTLSeconds, &out.DNSEntryTTLSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootControllerConfiguration. +func (in *ShootControllerConfiguration) DeepCopy() *ShootControllerConfiguration { + if in == nil { + return nil + } + out := new(ShootControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootStateSyncControllerConfiguration) DeepCopyInto(out *ShootStateSyncControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootStateSyncControllerConfiguration. +func (in *ShootStateSyncControllerConfiguration) DeepCopy() *ShootStateSyncControllerConfiguration { + if in == nil { + return nil + } + out := new(ShootStateSyncControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootedSeedRegistrationControllerConfiguration) DeepCopyInto(out *ShootedSeedRegistrationControllerConfiguration) { + *out = *in + if in.SyncJitterPeriod != nil { + in, out := &in.SyncJitterPeriod, &out.SyncJitterPeriod + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootedSeedRegistrationControllerConfiguration. +func (in *ShootedSeedRegistrationControllerConfiguration) DeepCopy() *ShootedSeedRegistrationControllerConfiguration { + if in == nil { + return nil + } + out := new(ShootedSeedRegistrationControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaleExtensionHealthChecks) DeepCopyInto(out *StaleExtensionHealthChecks) { + *out = *in + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaleExtensionHealthChecks. +func (in *StaleExtensionHealthChecks) DeepCopy() *StaleExtensionHealthChecks { + if in == nil { + return nil + } + out := new(StaleExtensionHealthChecks) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSServer) DeepCopyInto(out *TLSServer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSServer. +func (in *TLSServer) DeepCopy() *TLSServer { + if in == nil { + return nil + } + out := new(TLSServer) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.defaults.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.defaults.go new file mode 100644 index 000000000..b76c47344 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,93 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&GardenletConfiguration{}, func(obj interface{}) { SetObjectDefaults_GardenletConfiguration(obj.(*GardenletConfiguration)) }) + return nil +} + +func SetObjectDefaults_GardenletConfiguration(in *GardenletConfiguration) { + SetDefaults_GardenletConfiguration(in) + if in.GardenClientConnection != nil { + SetDefaults_ClientConnectionConfiguration(&in.GardenClientConnection.ClientConnectionConfiguration) + } + if in.SeedClientConnection != nil { + SetDefaults_ClientConnectionConfiguration(&in.SeedClientConnection.ClientConnectionConfiguration) + } + if in.ShootClientConnection != nil { + SetDefaults_ClientConnectionConfiguration(&in.ShootClientConnection.ClientConnectionConfiguration) + } + if in.Controllers != nil { + if in.Controllers.BackupBucket != nil { + SetDefaults_BackupBucketControllerConfiguration(in.Controllers.BackupBucket) + } + if in.Controllers.BackupEntry != nil { + SetDefaults_BackupEntryControllerConfiguration(in.Controllers.BackupEntry) + } + if in.Controllers.ControllerInstallation != nil { + SetDefaults_ControllerInstallationControllerConfiguration(in.Controllers.ControllerInstallation) + } + if in.Controllers.ControllerInstallationCare != nil { + SetDefaults_ControllerInstallationCareControllerConfiguration(in.Controllers.ControllerInstallationCare) + } + if in.Controllers.ControllerInstallationRequired != nil { + SetDefaults_ControllerInstallationRequiredControllerConfiguration(in.Controllers.ControllerInstallationRequired) + } + if in.Controllers.Seed != nil { + SetDefaults_SeedControllerConfiguration(in.Controllers.Seed) + } + if in.Controllers.Shoot != nil { + SetDefaults_ShootControllerConfiguration(in.Controllers.Shoot) + } + if in.Controllers.ShootCare != nil { + SetDefaults_ShootCareControllerConfiguration(in.Controllers.ShootCare) + if in.Controllers.ShootCare.StaleExtensionHealthChecks != nil { + SetDefaults_StaleExtensionHealthChecks(in.Controllers.ShootCare.StaleExtensionHealthChecks) + } + } + if in.Controllers.ShootStateSync != nil { + SetDefaults_ShootStateSyncControllerConfiguration(in.Controllers.ShootStateSync) + } + if in.Controllers.ShootedSeedRegistration != nil { + SetDefaults_ShootedSeedRegistrationControllerConfiguration(in.Controllers.ShootedSeedRegistration) + } + if in.Controllers.SeedAPIServerNetworkPolicy != nil { + SetDefaults_SeedAPIServerNetworkPolicyControllerConfiguration(in.Controllers.SeedAPIServerNetworkPolicy) + } + } + if in.LeaderElection != nil { + SetDefaults_LeaderElectionConfiguration(in.LeaderElection) + } + if in.SNI != nil { + SetDefaults_SNI(in.SNI) + if in.SNI.Ingress != nil { + SetDefaults_SNIIngress(in.SNI.Ingress) + } + } +} diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/zz_generated.deepcopy.go new file mode 100644 index 000000000..e2f1ad97e --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/zz_generated.deepcopy.go @@ -0,0 +1,852 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config + +import ( + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + klog "k8s.io/klog" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucketControllerConfiguration) DeepCopyInto(out *BackupBucketControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketControllerConfiguration. +func (in *BackupBucketControllerConfiguration) DeepCopy() *BackupBucketControllerConfiguration { + if in == nil { + return nil + } + out := new(BackupBucketControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupEntryControllerConfiguration) DeepCopyInto(out *BackupEntryControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + if in.DeletionGracePeriodHours != nil { + in, out := &in.DeletionGracePeriodHours, &out.DeletionGracePeriodHours + *out = new(int) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntryControllerConfiguration. +func (in *BackupEntryControllerConfiguration) DeepCopy() *BackupEntryControllerConfiguration { + if in == nil { + return nil + } + out := new(BackupEntryControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionThreshold) DeepCopyInto(out *ConditionThreshold) { + *out = *in + out.Duration = in.Duration + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionThreshold. +func (in *ConditionThreshold) DeepCopy() *ConditionThreshold { + if in == nil { + return nil + } + out := new(ConditionThreshold) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerInstallationCareControllerConfiguration) DeepCopyInto(out *ControllerInstallationCareControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationCareControllerConfiguration. +func (in *ControllerInstallationCareControllerConfiguration) DeepCopy() *ControllerInstallationCareControllerConfiguration { + if in == nil { + return nil + } + out := new(ControllerInstallationCareControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerInstallationControllerConfiguration) DeepCopyInto(out *ControllerInstallationControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationControllerConfiguration. +func (in *ControllerInstallationControllerConfiguration) DeepCopy() *ControllerInstallationControllerConfiguration { + if in == nil { + return nil + } + out := new(ControllerInstallationControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerInstallationRequiredControllerConfiguration) DeepCopyInto(out *ControllerInstallationRequiredControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationRequiredControllerConfiguration. +func (in *ControllerInstallationRequiredControllerConfiguration) DeepCopy() *ControllerInstallationRequiredControllerConfiguration { + if in == nil { + return nil + } + out := new(ControllerInstallationRequiredControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FluentBit) DeepCopyInto(out *FluentBit) { + *out = *in + if in.ServiceSection != nil { + in, out := &in.ServiceSection, &out.ServiceSection + *out = new(string) + **out = **in + } + if in.InputSection != nil { + in, out := &in.InputSection, &out.InputSection + *out = new(string) + **out = **in + } + if in.OutputSection != nil { + in, out := &in.OutputSection, &out.OutputSection + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentBit. +func (in *FluentBit) DeepCopy() *FluentBit { + if in == nil { + return nil + } + out := new(FluentBit) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GardenClientConnection) DeepCopyInto(out *GardenClientConnection) { + *out = *in + out.ClientConnectionConfiguration = in.ClientConnectionConfiguration + if in.GardenClusterAddress != nil { + in, out := &in.GardenClusterAddress, &out.GardenClusterAddress + *out = new(string) + **out = **in + } + if in.GardenClusterCACert != nil { + in, out := &in.GardenClusterCACert, &out.GardenClusterCACert + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.BootstrapKubeconfig != nil { + in, out := &in.BootstrapKubeconfig, &out.BootstrapKubeconfig + *out = new(corev1.SecretReference) + **out = **in + } + if in.KubeconfigSecret != nil { + in, out := &in.KubeconfigSecret, &out.KubeconfigSecret + *out = new(corev1.SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GardenClientConnection. +func (in *GardenClientConnection) DeepCopy() *GardenClientConnection { + if in == nil { + return nil + } + out := new(GardenClientConnection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GardenletConfiguration) DeepCopyInto(out *GardenletConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.GardenClientConnection != nil { + in, out := &in.GardenClientConnection, &out.GardenClientConnection + *out = new(GardenClientConnection) + (*in).DeepCopyInto(*out) + } + if in.SeedClientConnection != nil { + in, out := &in.SeedClientConnection, &out.SeedClientConnection + *out = new(SeedClientConnection) + **out = **in + } + if in.ShootClientConnection != nil { + in, out := &in.ShootClientConnection, &out.ShootClientConnection + *out = new(ShootClientConnection) + **out = **in + } + if in.Controllers != nil { + in, out := &in.Controllers, &out.Controllers + *out = new(GardenletControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourcesConfiguration) + (*in).DeepCopyInto(*out) + } + if in.LeaderElection != nil { + in, out := &in.LeaderElection, &out.LeaderElection + *out = new(LeaderElectionConfiguration) + (*in).DeepCopyInto(*out) + } + if in.LogLevel != nil { + in, out := &in.LogLevel, &out.LogLevel + *out = new(string) + **out = **in + } + if in.KubernetesLogLevel != nil { + in, out := &in.KubernetesLogLevel, &out.KubernetesLogLevel + *out = new(klog.Level) + **out = **in + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(ServerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.SeedConfig != nil { + in, out := &in.SeedConfig, &out.SeedConfig + *out = new(SeedConfig) + (*in).DeepCopyInto(*out) + } + if in.SeedSelector != nil { + in, out := &in.SeedSelector, &out.SeedSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(Logging) + (*in).DeepCopyInto(*out) + } + if in.SNI != nil { + in, out := &in.SNI, &out.SNI + *out = new(SNI) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GardenletConfiguration. +func (in *GardenletConfiguration) DeepCopy() *GardenletConfiguration { + if in == nil { + return nil + } + out := new(GardenletConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GardenletConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GardenletControllerConfiguration) DeepCopyInto(out *GardenletControllerConfiguration) { + *out = *in + if in.BackupBucket != nil { + in, out := &in.BackupBucket, &out.BackupBucket + *out = new(BackupBucketControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.BackupEntry != nil { + in, out := &in.BackupEntry, &out.BackupEntry + *out = new(BackupEntryControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ControllerInstallation != nil { + in, out := &in.ControllerInstallation, &out.ControllerInstallation + *out = new(ControllerInstallationControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ControllerInstallationCare != nil { + in, out := &in.ControllerInstallationCare, &out.ControllerInstallationCare + *out = new(ControllerInstallationCareControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ControllerInstallationRequired != nil { + in, out := &in.ControllerInstallationRequired, &out.ControllerInstallationRequired + *out = new(ControllerInstallationRequiredControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Seed != nil { + in, out := &in.Seed, &out.Seed + *out = new(SeedControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Shoot != nil { + in, out := &in.Shoot, &out.Shoot + *out = new(ShootControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ShootCare != nil { + in, out := &in.ShootCare, &out.ShootCare + *out = new(ShootCareControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ShootStateSync != nil { + in, out := &in.ShootStateSync, &out.ShootStateSync + *out = new(ShootStateSyncControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ShootedSeedRegistration != nil { + in, out := &in.ShootedSeedRegistration, &out.ShootedSeedRegistration + *out = new(ShootedSeedRegistrationControllerConfiguration) + (*in).DeepCopyInto(*out) + } + if in.SeedAPIServerNetworkPolicy != nil { + in, out := &in.SeedAPIServerNetworkPolicy, &out.SeedAPIServerNetworkPolicy + *out = new(SeedAPIServerNetworkPolicyControllerConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GardenletControllerConfiguration. +func (in *GardenletControllerConfiguration) DeepCopy() *GardenletControllerConfiguration { + if in == nil { + return nil + } + out := new(GardenletControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPSServer) DeepCopyInto(out *HTTPSServer) { + *out = *in + out.Server = in.Server + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSServer) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPSServer. +func (in *HTTPSServer) DeepCopy() *HTTPSServer { + if in == nil { + return nil + } + out := new(HTTPSServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaderElectionConfiguration) DeepCopyInto(out *LeaderElectionConfiguration) { + *out = *in + out.LeaderElectionConfiguration = in.LeaderElectionConfiguration + if in.LockObjectNamespace != nil { + in, out := &in.LockObjectNamespace, &out.LockObjectNamespace + *out = new(string) + **out = **in + } + if in.LockObjectName != nil { + in, out := &in.LockObjectName, &out.LockObjectName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElectionConfiguration. +func (in *LeaderElectionConfiguration) DeepCopy() *LeaderElectionConfiguration { + if in == nil { + return nil + } + out := new(LeaderElectionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Logging) DeepCopyInto(out *Logging) { + *out = *in + if in.FluentBit != nil { + in, out := &in.FluentBit, &out.FluentBit + *out = new(FluentBit) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Logging. +func (in *Logging) DeepCopy() *Logging { + if in == nil { + return nil + } + out := new(Logging) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcesConfiguration) DeepCopyInto(out *ResourcesConfiguration) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Reserved != nil { + in, out := &in.Reserved, &out.Reserved + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesConfiguration. +func (in *ResourcesConfiguration) DeepCopy() *ResourcesConfiguration { + if in == nil { + return nil + } + out := new(ResourcesConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SNI) DeepCopyInto(out *SNI) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = new(SNIIngress) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SNI. +func (in *SNI) DeepCopy() *SNI { + if in == nil { + return nil + } + out := new(SNI) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SNIIngress) DeepCopyInto(out *SNIIngress) { + *out = *in + if in.ServiceName != nil { + in, out := &in.ServiceName, &out.ServiceName + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SNIIngress. +func (in *SNIIngress) DeepCopy() *SNIIngress { + if in == nil { + return nil + } + out := new(SNIIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedAPIServerNetworkPolicyControllerConfiguration) DeepCopyInto(out *SeedAPIServerNetworkPolicyControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedAPIServerNetworkPolicyControllerConfiguration. +func (in *SeedAPIServerNetworkPolicyControllerConfiguration) DeepCopy() *SeedAPIServerNetworkPolicyControllerConfiguration { + if in == nil { + return nil + } + out := new(SeedAPIServerNetworkPolicyControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedClientConnection) DeepCopyInto(out *SeedClientConnection) { + *out = *in + out.ClientConnectionConfiguration = in.ClientConnectionConfiguration + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedClientConnection. +func (in *SeedClientConnection) DeepCopy() *SeedClientConnection { + if in == nil { + return nil + } + out := new(SeedClientConnection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedConfig) DeepCopyInto(out *SeedConfig) { + *out = *in + in.SeedTemplate.DeepCopyInto(&out.SeedTemplate) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedConfig. +func (in *SeedConfig) DeepCopy() *SeedConfig { + if in == nil { + return nil + } + out := new(SeedConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedControllerConfiguration) DeepCopyInto(out *SeedControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedControllerConfiguration. +func (in *SeedControllerConfiguration) DeepCopy() *SeedControllerConfiguration { + if in == nil { + return nil + } + out := new(SeedControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Server) DeepCopyInto(out *Server) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server. +func (in *Server) DeepCopy() *Server { + if in == nil { + return nil + } + out := new(Server) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerConfiguration) DeepCopyInto(out *ServerConfiguration) { + *out = *in + in.HTTPS.DeepCopyInto(&out.HTTPS) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerConfiguration. +func (in *ServerConfiguration) DeepCopy() *ServerConfiguration { + if in == nil { + return nil + } + out := new(ServerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootCareControllerConfiguration) DeepCopyInto(out *ShootCareControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(v1.Duration) + **out = **in + } + if in.StaleExtensionHealthChecks != nil { + in, out := &in.StaleExtensionHealthChecks, &out.StaleExtensionHealthChecks + *out = new(StaleExtensionHealthChecks) + (*in).DeepCopyInto(*out) + } + if in.ConditionThresholds != nil { + in, out := &in.ConditionThresholds, &out.ConditionThresholds + *out = make([]ConditionThreshold, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootCareControllerConfiguration. +func (in *ShootCareControllerConfiguration) DeepCopy() *ShootCareControllerConfiguration { + if in == nil { + return nil + } + out := new(ShootCareControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootClientConnection) DeepCopyInto(out *ShootClientConnection) { + *out = *in + out.ClientConnectionConfiguration = in.ClientConnectionConfiguration + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootClientConnection. +func (in *ShootClientConnection) DeepCopy() *ShootClientConnection { + if in == nil { + return nil + } + out := new(ShootClientConnection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootControllerConfiguration) DeepCopyInto(out *ShootControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + if in.ProgressReportPeriod != nil { + in, out := &in.ProgressReportPeriod, &out.ProgressReportPeriod + *out = new(v1.Duration) + **out = **in + } + if in.ReconcileInMaintenanceOnly != nil { + in, out := &in.ReconcileInMaintenanceOnly, &out.ReconcileInMaintenanceOnly + *out = new(bool) + **out = **in + } + if in.RespectSyncPeriodOverwrite != nil { + in, out := &in.RespectSyncPeriodOverwrite, &out.RespectSyncPeriodOverwrite + *out = new(bool) + **out = **in + } + if in.RetryDuration != nil { + in, out := &in.RetryDuration, &out.RetryDuration + *out = new(v1.Duration) + **out = **in + } + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(v1.Duration) + **out = **in + } + if in.DNSEntryTTLSeconds != nil { + in, out := &in.DNSEntryTTLSeconds, &out.DNSEntryTTLSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootControllerConfiguration. +func (in *ShootControllerConfiguration) DeepCopy() *ShootControllerConfiguration { + if in == nil { + return nil + } + out := new(ShootControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootStateSyncControllerConfiguration) DeepCopyInto(out *ShootStateSyncControllerConfiguration) { + *out = *in + if in.ConcurrentSyncs != nil { + in, out := &in.ConcurrentSyncs, &out.ConcurrentSyncs + *out = new(int) + **out = **in + } + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootStateSyncControllerConfiguration. +func (in *ShootStateSyncControllerConfiguration) DeepCopy() *ShootStateSyncControllerConfiguration { + if in == nil { + return nil + } + out := new(ShootStateSyncControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootedSeedRegistrationControllerConfiguration) DeepCopyInto(out *ShootedSeedRegistrationControllerConfiguration) { + *out = *in + if in.SyncJitterPeriod != nil { + in, out := &in.SyncJitterPeriod, &out.SyncJitterPeriod + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootedSeedRegistrationControllerConfiguration. +func (in *ShootedSeedRegistrationControllerConfiguration) DeepCopy() *ShootedSeedRegistrationControllerConfiguration { + if in == nil { + return nil + } + out := new(ShootedSeedRegistrationControllerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaleExtensionHealthChecks) DeepCopyInto(out *StaleExtensionHealthChecks) { + *out = *in + if in.Threshold != nil { + in, out := &in.Threshold, &out.Threshold + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaleExtensionHealthChecks. +func (in *StaleExtensionHealthChecks) DeepCopy() *StaleExtensionHealthChecks { + if in == nil { + return nil + } + out := new(StaleExtensionHealthChecks) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSServer) DeepCopyInto(out *TLSServer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSServer. +func (in *TLSServer) DeepCopy() *TLSServer { + if in == nil { + return nil + } + out := new(TLSServer) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/gardener/gardener/pkg/logger/logger.go b/vendor/github.com/gardener/gardener/pkg/logger/logger.go index 00cdd4ecf..0dd8d051f 100644 --- a/vendor/github.com/gardener/gardener/pkg/logger/logger.go +++ b/vendor/github.com/gardener/gardener/pkg/logger/logger.go @@ -20,7 +20,10 @@ import ( "io/ioutil" "os" + "github.com/gardener/gardener/pkg/utils" + "github.com/sirupsen/logrus" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) // Logger is the standard logger for the Gardener which is used for all messages which are not Shoot @@ -79,6 +82,17 @@ func NewShootLogger(logger *logrus.Logger, shoot, project string) *logrus.Entry // NewFieldLogger extends an existing logrus logger and adds the provided additional field. // Example output: time="2017-06-08T13:00:49+02:00" level=info msg="something" =. -func NewFieldLogger(logger *logrus.Logger, fieldKey, fieldValue string) *logrus.Entry { +func NewFieldLogger(logger logrus.FieldLogger, fieldKey, fieldValue string) *logrus.Entry { return logger.WithField(fieldKey, fieldValue) } + +// IDFieldName is the name of the id field for a logger. +const IDFieldName = "process_id" + +// NewIDLogger extends an existing logrus logger with a randomly generated id field. +// Example output: time="2017-06-08T13:00:49+02:00" level=info msg="something" id=123abcde. +func NewIDLogger(logger logrus.FieldLogger) logrus.FieldLogger { + id, err := utils.GenerateRandomString(8) + utilruntime.Must(err) + return NewFieldLogger(logger, IDFieldName, id) +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/checksums.go b/vendor/github.com/gardener/gardener/pkg/utils/checksums.go new file mode 100644 index 000000000..c53e83bcb --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/checksums.go @@ -0,0 +1,48 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "encoding/json" + "sort" +) + +// ComputeSecretCheckSum computes the sha256 checksum of secret data. +func ComputeSecretCheckSum(data map[string][]byte) string { + var ( + hash string + keys []string + ) + + for k := range data { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + hash += ComputeSHA256Hex(data[k]) + } + + return ComputeSHA256Hex([]byte(hash)) +} + +// ComputeChecksum computes a SHA256 checksum for the give map. +func ComputeChecksum(data interface{}) string { + jsonString, err := json.Marshal(data) + if err != nil { + return "" + } + return ComputeSHA256Hex(jsonString) +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/context/context.go b/vendor/github.com/gardener/gardener/pkg/utils/context/context.go index 21e656154..0d339bdc5 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/context/context.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/context/context.go @@ -19,6 +19,17 @@ import ( "time" ) +// FromStopChannel creates a new context from a given stop channel. +func FromStopChannel(stopCh <-chan struct{}) context.Context { + ctx, cancel := context.WithCancel(context.Background()) + go func() { + defer cancel() + <-stopCh + }() + + return ctx +} + type ops struct{} // WithTimeout returns the context with the given timeout and a CancelFunc to cleanup its resources. diff --git a/vendor/github.com/gardener/gardener/pkg/utils/encoding.go b/vendor/github.com/gardener/gardener/pkg/utils/encoding.go index abd5b0475..13cf9d9fd 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/encoding.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/encoding.go @@ -63,6 +63,24 @@ func EncodePrivateKeyInPKCS8(key *rsa.PrivateKey) ([]byte, error) { }), nil } +// DecodeRSAPrivateKeyFromPKCS8 takes a byte slice, decodes it from the PKCS8 format, tries to convert it +// to an rsa.PrivateKey object, and returns it. In case an error occurs, it returns the error. +func DecodeRSAPrivateKeyFromPKCS8(bytes []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(bytes) + if block == nil || block.Type != "RSA PRIVATE KEY" { + return nil, errors.New("could not decode the PEM-encoded RSA private key") + } + key, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + rsaKey, ok := key.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("the decoded key is not an RSA private key") + } + return rsaKey, nil +} + // DecodePrivateKey takes a byte slice, decodes it from the PEM format, converts it to an rsa.PrivateKey // object, and returns it. In case an error occurs, it returns the error. func DecodePrivateKey(bytes []byte) (*rsa.PrivateKey, error) { diff --git a/vendor/github.com/gardener/gardener/pkg/utils/errors/multierror.go b/vendor/github.com/gardener/gardener/pkg/utils/errors/multierror.go index c0739b277..69e94553a 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/errors/multierror.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/errors/multierror.go @@ -16,6 +16,7 @@ package errors import ( "fmt" + "github.com/hashicorp/go-multierror" ) diff --git a/vendor/github.com/gardener/gardener/pkg/utils/imagevector/imagevector.go b/vendor/github.com/gardener/gardener/pkg/utils/imagevector/imagevector.go index 6dcfa3392..597fee2de 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/imagevector/imagevector.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/imagevector/imagevector.go @@ -199,7 +199,7 @@ func TargetVersion(version string) FindOptionFunc { } } -var r = regexp.MustCompile(`^(v?[0-9]+|=)`) +var r = regexp.MustCompile(`^(v?[0-9]+\.[0-9]+\.[0-9]+|=)`) func checkConstraint(constraint, version *string) (score int, ok bool, err error) { if constraint == nil || version == nil { @@ -326,3 +326,12 @@ func (i *Image) String() string { return i.Repository + delimiter + *i.Tag } + +// ImageMapToValues transforms the given image name to image mapping into chart Values. +func ImageMapToValues(m map[string]*Image) map[string]interface{} { + out := make(map[string]interface{}, len(m)) + for k, v := range m { + out[k] = v.String() + } + return out +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/and.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/and.go new file mode 100644 index 000000000..b41efa23f --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/and.go @@ -0,0 +1,35 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package health + +import ( + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Func is a type for a function that checks the health of a runtime.Object. +type Func func(client.Object) error + +// And combines multiple health check funcs to a single func, checking all funcs sequentially and return the first +// error that occurs or nil if no error occurs.¬ +func And(fns ...Func) Func { + return func(o client.Object) error { + for _, fn := range fns { + if err := fn(o); err != nil { + return err + } + } + return nil + } +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/health.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/health.go index bb7d80e70..53bbd6fb8 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/health.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/health.go @@ -15,22 +15,29 @@ package health import ( + "context" "fmt" "net/http" "time" - gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" - v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" - gardencorev1beta1helper "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper" - extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" - "github.com/gardener/gardener/pkg/utils" - druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1" + resourcesv1alpha1 "github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1" + "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" + gardencorev1beta1helper "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper" + extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" + "github.com/gardener/gardener/pkg/client/kubernetes" + "github.com/gardener/gardener/pkg/utils" + "github.com/gardener/gardener/pkg/utils/retry" ) func requiredConditionMissing(conditionType string) error { @@ -241,13 +248,28 @@ var ( // CheckSeed checks if the Seed is up-to-date and if its extensions have been successfully bootstrapped. func CheckSeed(seed *gardencorev1beta1.Seed, identity *gardencorev1beta1.Gardener) error { - if seed.Status.ObservedGeneration < seed.Generation { - return fmt.Errorf("observed generation outdated (%d/%d)", seed.Status.ObservedGeneration, seed.Generation) - } if !apiequality.Semantic.DeepEqual(seed.Status.Gardener, identity) { return fmt.Errorf("observing Gardener version not up to date (%v/%v)", seed.Status.Gardener, identity) } + return checkSeed(seed, identity) +} + +// CheckSeedForMigration checks if the Seed is up-to-date (comparing only the versions) and if its extensions have been successfully bootstrapped. +func CheckSeedForMigration(seed *gardencorev1beta1.Seed, identity *gardencorev1beta1.Gardener) error { + if seed.Status.Gardener.Version != identity.Version { + return fmt.Errorf("observing Gardener version not up to date (%s/%s)", seed.Status.Gardener.Version, identity.Version) + } + + return checkSeed(seed, identity) +} + +// checkSeed checks if the seed.Status.ObservedGeneration ObservedGeneration is not outdated and if its extensions have been successfully bootstrapped. +func checkSeed(seed *gardencorev1beta1.Seed, identity *gardencorev1beta1.Gardener) error { + if seed.Status.ObservedGeneration < seed.Generation { + return fmt.Errorf("observed generation outdated (%d/%d)", seed.Status.ObservedGeneration, seed.Generation) + } + for _, trueConditionType := range trueSeedConditionTypes { conditionType := string(trueConditionType) condition := gardencorev1beta1helper.GetCondition(seed.Status.Conditions, trueConditionType) @@ -268,61 +290,64 @@ func CheckSeed(seed *gardencorev1beta1.Seed, identity *gardencorev1beta1.Gardene // * No gardener.cloud/operation is set // * No lastError is in the status // * A last operation is state succeeded is present -func CheckExtensionObject(obj extensionsv1alpha1.Object) error { - status := obj.GetExtensionStatus() - if status.GetObservedGeneration() != obj.GetGeneration() { - return fmt.Errorf("observed generation outdated (%d/%d)", status.GetObservedGeneration(), obj.GetGeneration()) +func CheckExtensionObject(o client.Object) error { + obj, ok := o.(extensionsv1alpha1.Object) + if !ok { + return fmt.Errorf("expected extensionsv1alpha1.Object but got %T", o) } - op, ok := obj.GetAnnotations()[v1beta1constants.GardenerOperation] - if ok { - return fmt.Errorf("gardener operation %q is not yet picked up by extension controller", op) - } + status := obj.GetExtensionStatus() + return checkExtensionObject(obj.GetGeneration(), status.GetObservedGeneration(), obj.GetAnnotations(), status.GetLastError(), status.GetLastOperation()) +} - if lastErr := status.GetLastError(); lastErr != nil { - return fmt.Errorf("extension encountered error during reconciliation: %s", lastErr.GetDescription()) - } +// ExtensionOperationHasBeenUpdatedSince returns a health check function that checks if an extension Object's last +// operation has been updated since `lastUpdateTime`. +func ExtensionOperationHasBeenUpdatedSince(lastUpdateTime metav1.Time) Func { + return func(o client.Object) error { + obj, ok := o.(extensionsv1alpha1.Object) + if !ok { + return fmt.Errorf("expected extensionsv1alpha1.Object but got %T", o) + } - lastOp := status.GetLastOperation() - if lastOp == nil { - return fmt.Errorf("extension did not record a last operation yet") + lastOperation := obj.GetExtensionStatus().GetLastOperation() + if lastOperation == nil || !lastOperation.LastUpdateTime.After(lastUpdateTime.Time) { + return fmt.Errorf("extension operation was not updated yet") + } + return nil } +} - if lastOp.GetState() != gardencorev1beta1.LastOperationStateSucceeded { - return fmt.Errorf("extension state is not succeeded but %v", lastOp.GetState()) +// CheckBackupBucket checks if an backup bucket Object is healthy or not. +func CheckBackupBucket(bb client.Object) error { + obj, ok := bb.(*gardencorev1beta1.BackupBucket) + if !ok { + return fmt.Errorf("expected gardencorev1beta1.BackupBucket but got %T", bb) } - return nil + return checkExtensionObject(obj.Generation, obj.Status.ObservedGeneration, obj.Annotations, obj.Status.LastError, obj.Status.LastOperation) } -// CheckBackupBucket checks if an backup bucket Object is healthy or not. -// An extension object is healthy if -// * Its observed generation is up-to-date -// * No gardener.cloud/operation is set -// * No lastError is in the status -// * A last operation is state succeeded is present -func CheckBackupBucket(obj *gardencorev1beta1.BackupBucket) error { - status := obj.Status - if status.ObservedGeneration != obj.Generation { - return fmt.Errorf("observed generation outdated (%d/%d)", status.ObservedGeneration, obj.Generation) +// checkExtensionObject checks if an extension Object is healthy or not. +func checkExtensionObject(generation int64, observedGeneration int64, annotations map[string]string, lastError *gardencorev1beta1.LastError, lastOperation *gardencorev1beta1.LastOperation) error { + if lastError != nil { + return gardencorev1beta1helper.NewErrorWithCodes(fmt.Sprintf("extension encountered error during reconciliation: %s", lastError.Description), lastError.Codes...) } - op, ok := obj.GetAnnotations()[v1beta1constants.GardenerOperation] - if ok { - return fmt.Errorf("gardener operation %q is not yet picked up by controller", op) + if observedGeneration != generation { + return fmt.Errorf("observed generation outdated (%d/%d)", observedGeneration, generation) } - if lastErr := status.LastError; lastErr != nil { - return fmt.Errorf("backup bucket encountered error during reconciliation: %s", lastErr.GetDescription()) + if op, ok := annotations[v1beta1constants.GardenerOperation]; ok { + return fmt.Errorf("gardener operation %q is not yet picked up by controller", op) } - lastOp := status.LastOperation - if lastOp == nil { - return fmt.Errorf("backup bucket did not record a last operation yet") + if lastOperation == nil { + return fmt.Errorf("extension did not record a last operation yet") } - if lastOp.GetState() != gardencorev1beta1.LastOperationStateSucceeded { - return fmt.Errorf("backup bucket state is not succeeded but %v", lastOp.GetState()) + if lastOperation.State != gardencorev1beta1.LastOperationStateSucceeded { + return fmt.Errorf("extension state is not succeeded but %v", lastOperation.State) } + return nil } @@ -333,9 +358,9 @@ var Now = time.Now type conditionerFunc func(conditionType string, message string) gardencorev1beta1.Condition // CheckAPIServerAvailability checks if the API server of a cluster is reachable and measure the response time. -func CheckAPIServerAvailability(condition gardencorev1beta1.Condition, restClient rest.Interface, conditioner conditionerFunc) gardencorev1beta1.Condition { +func CheckAPIServerAvailability(ctx context.Context, condition gardencorev1beta1.Condition, restClient rest.Interface, conditioner conditionerFunc, log logrus.FieldLogger) gardencorev1beta1.Condition { now := Now() - response := restClient.Get().AbsPath("/healthz").Do() + response := restClient.Get().AbsPath("/healthz").Do(ctx) responseDurationText := fmt.Sprintf("[response_time:%dms]", Now().Sub(now).Nanoseconds()/time.Millisecond.Nanoseconds()) if response.Error() != nil { message := fmt.Sprintf("Request to API server /healthz endpoint failed. %s (%s)", responseDurationText, response.Error().Error()) @@ -354,10 +379,78 @@ func CheckAPIServerAvailability(condition gardencorev1beta1.Condition, restClien } else { body = string(bodyRaw) } - message := fmt.Sprintf("API server /healthz endpoint endpoint check returned a non ok status code %d. %s (%s)", statusCode, responseDurationText, body) + message := fmt.Sprintf("API server /healthz endpoint check returned a non ok status code %d. (%s)", statusCode, body) + log.Error(message) return conditioner("HealthzRequestError", message) } - message := fmt.Sprintf("API server /healthz endpoint responded with success status code. %s", responseDurationText) + message := "API server /healthz endpoint responded with success status code." return gardencorev1beta1helper.UpdatedCondition(condition, gardencorev1beta1.ConditionTrue, "HealthzRequestSucceeded", message) } + +var ( + trueManagedResourceConditionTypes = []resourcesv1alpha1.ConditionType{ + resourcesv1alpha1.ResourcesApplied, + resourcesv1alpha1.ResourcesHealthy, + } +) + +// CheckManagedResource checks whether the given ManagedResource is healthy. +// A ManagedResource is considered healthy if its controller observed its current revision, +// and if the required conditions are healthy. +func CheckManagedResource(managedResource *resourcesv1alpha1.ManagedResource) error { + if managedResource.Status.ObservedGeneration < managedResource.Generation { + return fmt.Errorf("observed generation outdated (%d/%d)", managedResource.Status.ObservedGeneration, managedResource.Generation) + } + + for _, trueConditionType := range trueManagedResourceConditionTypes { + conditionType := string(trueConditionType) + condition := getManagedResourceCondition(managedResource.Status.Conditions, trueConditionType) + if condition == nil { + return requiredConditionMissing(conditionType) + } + if err := checkConditionState(conditionType, string(corev1.ConditionTrue), string(condition.Status), condition.Reason, condition.Message); err != nil { + return err + } + } + + return nil +} + +func getManagedResourceCondition(conditions []resourcesv1alpha1.ManagedResourceCondition, conditionType resourcesv1alpha1.ConditionType) *resourcesv1alpha1.ManagedResourceCondition { + for _, condition := range conditions { + if condition.Type == conditionType { + return &condition + } + } + return nil +} + +// CheckTunnelConnection checks if the tunnel connection between the control plane and the shoot networks +// is established. +func CheckTunnelConnection(ctx context.Context, shootClient kubernetes.Interface, logger logrus.FieldLogger, tunnelName string) (bool, error) { + podList := &corev1.PodList{} + if err := shootClient.Client().List(ctx, podList, client.InNamespace(metav1.NamespaceSystem), client.MatchingLabels{"app": tunnelName}); err != nil { + return retry.SevereError(err) + } + + var tunnelPod *corev1.Pod + for _, pod := range podList.Items { + if pod.Status.Phase == corev1.PodRunning { + tunnelPod = &pod + break + } + } + + if tunnelPod == nil { + logger.Infof("Waiting until a running %s pod exists in the Shoot cluster...", tunnelName) + return retry.MinorError(fmt.Errorf("no running %s pod found yet in the shoot cluster", tunnelName)) + } + if err := shootClient.CheckForwardPodPort(tunnelPod.Namespace, tunnelPod.Name, 0, 22); err != nil { + logger.Info("Waiting until the tunnel connection has been established...") + return retry.MinorError(fmt.Errorf("could not forward to %s pod: %v", tunnelName, err)) + } + + logger.Info("Tunnel connection has been established.") + return retry.Ok() +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/labels.go b/vendor/github.com/gardener/gardener/pkg/utils/labels.go new file mode 100644 index 000000000..e1085a3d4 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/labels.go @@ -0,0 +1,28 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +// MustNewRequirement creates a labels.Requirement with the given values and panics if there is an error. +func MustNewRequirement(key string, op selection.Operator, vals ...string) labels.Requirement { + req, err := labels.NewRequirement(key, op, vals) + utilruntime.Must(err) + return *req +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/miscellaneous.go b/vendor/github.com/gardener/gardener/pkg/utils/miscellaneous.go index 4b07cf182..ef482e51f 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/miscellaneous.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/miscellaneous.go @@ -17,8 +17,10 @@ package utils import ( "net" "regexp" + "strings" "time" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -122,3 +124,36 @@ func TestEmail(email string) bool { func IsTrue(value *bool) bool { return value != nil && *value } + +// IDForKeyWithOptionalValue returns an identifier for the given key + optional value. +func IDForKeyWithOptionalValue(key string, value *string) string { + v := "" + if value != nil { + v = "=" + *value + } + return key + v +} + +// QuantityPtr returns a Quantity pointer to its argument. +func QuantityPtr(q resource.Quantity) *resource.Quantity { + return &q +} + +// DurationPtr returns a time.Duration pointer to its argument. +func DurationPtr(d time.Duration) *time.Duration { + return &d +} + +// Indent indents the given string with the given number of spaces. +func Indent(str string, spaces int) string { + return strings.ReplaceAll(str, "\n", "\n"+strings.Repeat(" ", spaces)) +} + +// ShallowCopyMapStringInterface creates a shallow copy of the given map. +func ShallowCopyMapStringInterface(values map[string]interface{}) map[string]interface{} { + copiedValues := make(map[string]interface{}, len(values)) + for k, v := range values { + copiedValues[k] = v + } + return copiedValues +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/object.go b/vendor/github.com/gardener/gardener/pkg/utils/object.go new file mode 100644 index 000000000..c7df345c6 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/object.go @@ -0,0 +1,180 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "context" + + autoscalingv1 "k8s.io/api/autoscaling/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +var systemMetadataFields = []string{"ownerReferences", "uid", "resourceVersion", "generation", "selfLink", "creationTimestamp", "deletionTimestamp", "deletionGracePeriodSeconds", "managedFields"} + +// GetObjectByRef returns the object with the given reference and namespace using the given client. +// The full content of the object is returned as map[string]interface{}, except for system metadata fields. +// This function can be combined with runtime.DefaultUnstructuredConverter.FromUnstructured to get the object content +// as runtime.RawExtension. +func GetObjectByRef(ctx context.Context, c client.Client, ref *autoscalingv1.CrossVersionObjectReference, namespace string) (map[string]interface{}, error) { + gvk, err := gvkFromCrossVersionObjectReference(ref) + if err != nil { + return nil, err + } + return GetObject(ctx, c, gvk, ref.Name, namespace) +} + +// GetObjectByRef returns the object with the given GVK, name, and namespace as a map using the given client. +// The full content of the object is returned as map[string]interface{}, except for system metadata fields. +// This function can be combined with runtime.DefaultUnstructuredConverter.FromUnstructured to get the object content +// as runtime.RawExtension. +func GetObject(ctx context.Context, c client.Client, gvk schema.GroupVersionKind, name, namespace string) (map[string]interface{}, error) { + // Initialize the object + key := client.ObjectKey{Namespace: namespace, Name: name} + obj := &unstructured.Unstructured{} + obj.SetGroupVersionKind(gvk) + + // Get the object + if err := c.Get(ctx, key, obj); err != nil { + if !apierrors.IsNotFound(err) { + return nil, err + } + return nil, nil + } + + // Return object content + return filterMetadata(obj.UnstructuredContent(), systemMetadataFields...), nil +} + +// CreateOrUpdateObjectByRef creates or updates the object with the given reference and namespace using the given client. +// The object is created or updated with the given content, except for system metadata fields. +// This function can be combined with runtime.DefaultUnstructuredConverter.ToUnstructured to create or update an object +// from runtime.RawExtension. +func CreateOrUpdateObjectByRef(ctx context.Context, c client.Client, ref *autoscalingv1.CrossVersionObjectReference, namespace string, content map[string]interface{}) error { + gvk, err := gvkFromCrossVersionObjectReference(ref) + if err != nil { + return err + } + return CreateOrUpdateObject(ctx, c, gvk, ref.Name, namespace, content) +} + +// CreateOrUpdateObject creates or updates the object with the given GVK, name, and namespace using the given client. +// The object is created or updated with the given content, except for system metadata fields, namespace, and name. +// This function can be combined with runtime.DefaultUnstructuredConverter.ToUnstructured to create or update an object +// from runtime.RawExtension. +func CreateOrUpdateObject(ctx context.Context, c client.Client, gvk schema.GroupVersionKind, name, namespace string, content map[string]interface{}) error { + // Initialize the object + obj := &unstructured.Unstructured{} + obj.SetGroupVersionKind(gvk) + obj.SetName(name) + obj.SetNamespace(namespace) + + // Create or update the object + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + _, err := controllerutil.CreateOrUpdate(ctx, c, obj, func() error { + // Set object content + if content != nil { + obj.SetUnstructuredContent(mergeObjectContents(obj.UnstructuredContent(), + filterMetadata(content, add(systemMetadataFields, "namespace", "name")...))) + } + return nil + }) + return err + }) +} + +// DeleteObjectByRef deletes the object with the given reference and namespace using the given client. +func DeleteObjectByRef(ctx context.Context, c client.Client, ref *autoscalingv1.CrossVersionObjectReference, namespace string) error { + gvk, err := gvkFromCrossVersionObjectReference(ref) + if err != nil { + return err + } + return DeleteObject(ctx, c, gvk, ref.Name, namespace) +} + +// DeleteObject deletes the object with the given GVK, name, and namespace using the given client. +func DeleteObject(ctx context.Context, c client.Client, gvk schema.GroupVersionKind, name, namespace string) error { + // Initialize the object + obj := &unstructured.Unstructured{} + obj.SetGroupVersionKind(gvk) + obj.SetName(name) + obj.SetNamespace(namespace) + + // Delete the object + return client.IgnoreNotFound(c.Delete(ctx, obj)) +} + +func gvkFromCrossVersionObjectReference(ref *autoscalingv1.CrossVersionObjectReference) (schema.GroupVersionKind, error) { + gv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return schema.GroupVersionKind{}, err + } + return schema.GroupVersionKind{ + Group: gv.Group, + Version: gv.Version, + Kind: ref.Kind, + }, nil +} + +func mergeObjectContents(dest, src map[string]interface{}) map[string]interface{} { + // Merge metadata + srcMetadata, srcMetadataOK := src["metadata"].(map[string]interface{}) + if srcMetadataOK { + destMetadata, destMetadataOK := dest["metadata"].(map[string]interface{}) + if destMetadataOK { + dest["metadata"] = MergeMaps(destMetadata, srcMetadata) + } else { + dest["metadata"] = srcMetadata + } + } + + // Take spec and data from the source + for _, key := range []string{"spec", "data", "stringData"} { + srcSpec, srcSpecOK := src[key] + if srcSpecOK { + dest[key] = srcSpec + } else { + delete(dest, key) + } + } + + return dest +} + +func filterMetadata(content map[string]interface{}, fields ...string) map[string]interface{} { + // Copy content to result + result := make(map[string]interface{}) + for key, value := range content { + result[key] = value + } + + // Delete specified fields from result + if metadata, ok := result["metadata"].(map[string]interface{}); ok { + for _, field := range fields { + delete(metadata, field) + } + } + return result +} + +func add(s []string, elems ...string) []string { + result := make([]string, len(s)) + copy(result, s) + return append(result, elems...) +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/random.go b/vendor/github.com/gardener/gardener/pkg/utils/random.go index cf44a155c..f676f52e2 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/random.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/random.go @@ -15,8 +15,12 @@ package utils import ( - "crypto/rand" + cryptorand "crypto/rand" "math/big" + mathrand "math/rand" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // GenerateRandomString uses crypto/rand to generate a random string of the specified length . @@ -33,7 +37,7 @@ func GenerateRandomStringFromCharset(n int, allowedCharacters string) (string, e output := make([]byte, n) max := new(big.Int).SetInt64(int64(len(allowedCharacters))) for i := range output { - randomCharacter, err := rand.Int(rand.Reader, max) + randomCharacter, err := cryptorand.Int(cryptorand.Reader, max) if err != nil { return "", err } @@ -41,3 +45,21 @@ func GenerateRandomStringFromCharset(n int, allowedCharacters string) (string, e } return string(output), nil } + +// RandomDuration takes a time.Duration and computes a non-negative pseudo-random duration in [0,max). +// It returns 0ns if max is <= 0ns. +func RandomDuration(max time.Duration) time.Duration { + if max.Nanoseconds() <= 0 { + return time.Duration(0) + } + return time.Duration(mathrand.Int63n(max.Nanoseconds())) +} + +// RandomDurationWithMetaDuration takes a *metav1.Duration and computes a non-negative pseudo-random duration in [0,max). +// It returns 0ns if max is nil or <= 0ns. +func RandomDurationWithMetaDuration(max *metav1.Duration) time.Duration { + if max == nil { + return time.Duration(0) + } + return RandomDuration(max.Duration) +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/retry/retry.go b/vendor/github.com/gardener/gardener/pkg/utils/retry/retry.go index ceb50e895..75502758b 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/retry/retry.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/retry/retry.go @@ -78,56 +78,66 @@ func DefaultIntervalFactory() IntervalFactory { } // SevereError indicates an operation was not successful due to the given error and cannot be retried. -func SevereError(severeErr error) (done bool, err error) { +func SevereError(severeErr error) (bool, error) { return true, severeErr } // MinorError indicates an operation was not successful due to the given error but can be retried. -func MinorError(minorErr error) (done bool, err error) { +func MinorError(minorErr error) (bool, error) { return false, minorErr } // Ok indicates that an operation was successful and does not need to be retried. -func Ok() (done bool, err error) { +func Ok() (bool, error) { return true, nil } // NotOk indicates that an operation was not successful but can be retried. // It does not indicate an error. For better error reporting, consider MinorError. -func NotOk() (done bool, err error) { +func NotOk() (bool, error) { return false, nil } -type retryError struct { +// MinorOrSevereError returns a "severe" error in case the retry count exceeds the threshold. Otherwise, it returns +// a "minor" error. +func MinorOrSevereError(retryCountUntilSevere, threshold int, err error) (bool, error) { + if retryCountUntilSevere > threshold { + return SevereError(err) + } + return MinorError(err) +} + +// Error is an error that occurred during a retry operation. +type Error struct { ctxError error err error } // Cause implements Causer. -func (r *retryError) Cause() error { - if r.err != nil { - return r.err +func (e *Error) Cause() error { + if e.err != nil { + return e.err } - return r.ctxError + return e.ctxError } // Unwrap implements the Unwrap function // https://golang.org/pkg/errors/#Unwrap -func (r *retryError) Unwrap() error { - return r.err +func (e *Error) Unwrap() error { + return e.err } // Error implements error. -func (r *retryError) Error() string { - if r.err != nil { - return fmt.Sprintf("retry failed with %v, last error: %v", r.ctxError, r.err) +func (e *Error) Error() string { + if e.err != nil { + return fmt.Sprintf("retry failed with %v, last error: %v", e.ctxError, e.err) } - return fmt.Sprintf("retry failed with %v", r.ctxError) + return fmt.Sprintf("retry failed with %v", e.ctxError) } -// NewRetryError returns a new error with the given context error and error. The non-context error is optional. -func NewRetryError(ctxError, err error) error { - return &retryError{ctxError, err} +// NewError returns a new error with the given context error and error. The non-context error is optional. +func NewError(ctxError, err error) error { + return &Error{ctxError, err} } // UntilFor keeps retrying the given Func until it either errors severely or the context expires. @@ -157,12 +167,12 @@ func UntilFor(ctx context.Context, waitFunc WaitFunc, agg ErrorAggregator, f Fun case <-waitDone: select { case <-ctxDone: - return NewRetryError(ctx.Err(), agg.Error()) + return NewError(ctx.Err(), agg.Error()) default: return nil } case <-ctxDone: - return NewRetryError(ctx.Err(), agg.Error()) + return NewError(ctx.Err(), agg.Error()) } }(); err != nil { return err diff --git a/vendor/github.com/gardener/gardener/pkg/utils/timewindow.go b/vendor/github.com/gardener/gardener/pkg/utils/timewindow.go index 1b2af06fc..99cadc40d 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/timewindow.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/timewindow.go @@ -191,14 +191,18 @@ func (m *MaintenanceTimeWindow) Contains(tTime time.Time) bool { return t.Compare(m.begin) >= 0 && t.Compare(m.end) <= 0 } -var ( - // RandomFunc is a function that computes a random number. - RandomFunc = rand.Int63nRange -) +// RandomFunc is a function that computes a random number. +var RandomFunc = rand.Int63nRange // RandomDurationUntilNext computes the duration until a random time within the time window for the next maintenance // execution. -func (m *MaintenanceTimeWindow) RandomDurationUntilNext(from time.Time) time.Duration { +// The parameter is the time from which the calculation for the next maintenance time window shall be performed +// (typically: time.Now()). +// The parameter controls whether the beginning of the maintenance time window should be +// changed to if is already inside the maintenance time window (otherwise, it has no effect). As a +// consequence, this will return a random duration from until the end of the maintenance time window which is +// shorter than 24h. +func (m *MaintenanceTimeWindow) RandomDurationUntilNext(from time.Time, shiftBeginToFromIfContained bool) time.Duration { from = from.UTC() var ( @@ -206,6 +210,15 @@ func (m *MaintenanceTimeWindow) RandomDurationUntilNext(from time.Time) time.Dur end = m.adjustedEnd(from) ) + if shiftBeginToFromIfContained && m.Contains(from) { + dayOffset := 0 + if from.Hour()-begin.Hour() < 0 { + dayOffset = 1 + } + begin = from.AddDate(0, 0, dayOffset) + from = from.AddDate(0, 0, dayOffset) + } + if begin.Sub(from) < 0 && (m.Contains(from) || from.After(end)) { begin = begin.AddDate(0, 0, 1) end = end.AddDate(0, 0, 1) diff --git a/vendor/github.com/gardener/gardener/pkg/utils/version/version.go b/vendor/github.com/gardener/gardener/pkg/utils/version/version.go index ebbd7ca04..77a605860 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/version/version.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/version/version.go @@ -50,7 +50,7 @@ func CheckVersionMeetsConstraint(version, constraint string) (bool, error) { return false, err } - v, err := semver.NewVersion(version) + v, err := semver.NewVersion(normalizeVersion(version)) if err != nil { return false, err } diff --git a/vendor/github.com/gardener/hvpa-controller/LICENSE.md b/vendor/github.com/gardener/hvpa-controller/LICENSE.md new file mode 100644 index 000000000..fc6885489 --- /dev/null +++ b/vendor/github.com/gardener/hvpa-controller/LICENSE.md @@ -0,0 +1,296 @@ +``` + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +``` + +## APIs + +This project may include APIs to SAP or third party products or services. The use of these APIs, +products and services may be subject to additional agreements. In no event shall the application +of the Apache Software License, v.2 to this project grant any rights in or to these APIs, products +or services that would alter, expand, be inconsistent with, or supersede any terms of these additional +agreements. API means application programming interfaces, as well as their respective specifications +and implementing code that allows other software products to communicate with or call on SAP or +third party products or services (for example, SAP Enterprise Services, BAPIs, Idocs, RFCs and +ABAP calls or other user exits) and may be made available through SAP or third party products, +SDKs, documentation or other media. + +## Subcomponents + +This project includes the following subcomponents that are subject to separate license terms. +Your use of these subcomponents is subject to the separate license terms applicable to +each subcomponent. + +API. +https://github.com/kubernetes/api/ +Copyright 2017 The Kubernetes Authors. +Apache 2 license (https://github.com/kubernetes/api/blob/master/LICENSE ). + +APIMachinery. +https://github.com/kubernetes/apimachinery +Copyright 2017 The Kubernetes Authors. +Apache 2 license (https://github.com/kubernetes/apimachinery/blob/master/LICENSE ). + +Kubernetes client-go +https://github.com/kubernetes/client-go/ +Copyright 2018 The Kubernetes Authors +Apache 2 license (https://github.com/kubernetes/client-go/blob/master/LICENSE) + +Autoscaler. +https://github.com/kubernetes/autoscaler +Copyright 2017 The Kubernetes Authors. +Apache 2 license (https://github.com/kubernetes/autoscaler/blob/master/LICENSE ). + +Code-Generator. +https://git.k8s.io/code-generator. +Copyright 2017 The Kubernetes Authors. +Apache 2 license (https://git.k8s.io/code-generator/LICENSE) + +controller-runtime +https://github.com/kubernetes-sigs/controller-runtime +Copyright 2018 The Kubernetes Authors. +Apache 2 license (https://github.com/kubernetes-sigs/controller-runtime/blob/master/LICENSE). + +controller-tools +https://github.com/kubernetes-sigs/controller-tools +Copyright 2018 The Kubernetes Authors. +Apache 2 license (https://github.com/kubernetes-sigs/controller-runtime/blob/master/LICENSE). + +Klog +https://github.com/kubernetes/klog +Copyright 2017 The Kubernetes Authors +Apache 2 license (https://github.com/kubernetes/klog/blob/master/LICENSE) + +Ginkgo. +https://github.com/onsi/ginkgo. +Copyright (c) 2013-2014 Onsi Fakhouri +MIT license (https://github.com/onsi/ginkgo/blob/master/LICENSE) + +Gomega. +https://github.com/onsi/gomega. +Copyright (c) 2013-2014 Onsi Fakhouri +MIT license (https://github.com/onsi/gomega/blob/master/LICENSE) + +------ +## MIT License + + The MIT License (MIT) + + Copyright (c) + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. diff --git a/vendor/github.com/gardener/hvpa-controller/api/v1alpha1/groupversion_info.go b/vendor/github.com/gardener/hvpa-controller/api/v1alpha1/groupversion_info.go new file mode 100644 index 000000000..bfa927dc0 --- /dev/null +++ b/vendor/github.com/gardener/hvpa-controller/api/v1alpha1/groupversion_info.go @@ -0,0 +1,68 @@ +/* +Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the autoscaling v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=autoscaling.k8s.io +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + vpa_api "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" +) + +var ( + // GroupName is the group name use in this package + GroupName = "autoscaling.k8s.io" + + // SchemeGroupVersionVpa is group version used to register these objects + SchemeGroupVersionVpa = schema.GroupVersion{Group: GroupName, Version: "v1beta2"} + + // SchemeGroupVersionHvpa is group version used to register these objects + SchemeGroupVersionHvpa = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + + localSchemeBuilder = &SchemeBuilder + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersionHvpa, + &Hvpa{}, + &HvpaList{}, + ) + scheme.AddKnownTypes(SchemeGroupVersionVpa, + &vpa_api.VerticalPodAutoscaler{}, + &vpa_api.VerticalPodAutoscalerList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersionVpa) + metav1.AddToGroupVersion(scheme, SchemeGroupVersionHvpa) + return nil +} diff --git a/vendor/github.com/gardener/hvpa-controller/api/v1alpha1/hvpa_types.go b/vendor/github.com/gardener/hvpa-controller/api/v1alpha1/hvpa_types.go new file mode 100644 index 000000000..befc7e4a4 --- /dev/null +++ b/vendor/github.com/gardener/hvpa-controller/api/v1alpha1/hvpa_types.go @@ -0,0 +1,352 @@ +/* +Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + autoscaling "k8s.io/api/autoscaling/v2beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + vpa_api "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// VpaTemplateSpec defines the spec for VPA +type VpaTemplateSpec struct { + // Controls how the autoscaler computes recommended resources. + // The resource policy may be used to set constraints on the recommendations + // for individual containers. If not specified, the autoscaler computes recommended + // resources for all containers in the pod, without additional constraints. + // +optional + ResourcePolicy *vpa_api.PodResourcePolicy `json:"resourcePolicy,omitempty" protobuf:"bytes,2,opt,name=resourcePolicy"` +} + +// UpdatePolicy describes the rules on how changes are applied. +type UpdatePolicy struct { + // Controls when autoscaler applies changes to the resources. + // The default is 'Auto'. + // +optional + UpdateMode *string `json:"updateMode,omitempty" protobuf:"bytes,1,opt,name=updateMode"` +} + +const ( + // UpdateModeOff means that autoscaler never changes resources. + UpdateModeOff string = "Off" + // UpdateModeAuto means that autoscaler can update resources during the lifetime of the resource. + UpdateModeAuto string = "Auto" + // UpdateModeMaintenanceWindow means that HPA/VPA will only act during maintenance window. + UpdateModeMaintenanceWindow string = "MaintenanceWindow" + + // UpdateModeDefault is the default update mode + UpdateModeDefault = UpdateModeAuto +) + +// MaintenanceTimeWindow contains information about the time window for maintenance operations. +type MaintenanceTimeWindow struct { + // Begin is the beginning of the time window in the format HHMMSS+ZONE, e.g. "220000+0100". + Begin string `json:"begin"` + // End is the end of the time window in the format HHMMSS+ZONE, e.g. "220000+0100". + End string `json:"end"` +} + +// HpaTemplateSpec defines the spec for HPA +type HpaTemplateSpec struct { + // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. + // It defaults to 1 pod. + // +optional + MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,1,opt,name=minReplicas"` + + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,2,opt,name=maxReplicas"` + + // metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // If not set, the default metric will be set to 80% average CPU utilization. + // +optional + Metrics []autoscaling.MetricSpec `json:"metrics,omitempty" protobuf:"bytes,3,rep,name=metrics"` +} + +// WeightBasedScalingInterval defines the interval of replica counts in which VpaWeight is applied to VPA scaling +type WeightBasedScalingInterval struct { + // VpaWeight defines the weight (in percentage) to be given to VPA's recommendationd for the interval of number of replicas provided + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=100 + VpaWeight VpaWeight `json:"vpaWeight,omitempty"` + // StartReplicaCount is the number of replicas from which VpaWeight is applied to VPA scaling + // If this field is not provided, it will default to minReplicas of HPA + // +optional + StartReplicaCount int32 `json:"startReplicaCount,omitempty"` + // LastReplicaCount is the number of replicas till which VpaWeight is applied to VPA scaling + // If this field is not provided, it will default to maxReplicas of HPA + // +optional + LastReplicaCount int32 `json:"lastReplicaCount,omitempty"` +} + +// VpaSpec defines spec for VPA +type VpaSpec struct { + // Selector is a label query that should match VPA. + // Must match in order to be controlled. + // If empty, defaulted to labels on VPA template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty"` + + // Deploy defines whether the VPA is deployed or not + Deploy bool `json:"deploy,omitempty"` + + // ScaleUp defines the parameters for scale up + ScaleUp ScaleType `json:"scaleUp,omitempty"` + + // ScaleDown defines the parameters for scale down + ScaleDown ScaleType `json:"scaleDown,omitempty"` + + // Template is the object that describes the VPA that will be created. + // +optional + Template VpaTemplate `json:"template,omitempty"` + + // LimitsRequestsGapScaleParams is the scaling thresholds for limits + LimitsRequestsGapScaleParams ScaleParams `json:"limitsRequestsGapScaleParams,omitempty"` +} + +// ScaleType defines parameters for scaling +type ScaleType struct { + // Describes the rules on when changes are applied. + // If not specified, all fields in the `UpdatePolicy` are set to their + // default values. + // +optional + UpdatePolicy UpdatePolicy `json:"updatePolicy,omitempty"` + + // MinChange is the minimum change in the resource on which HVPA acts + // HVPA uses minimum of the Value and Percentage value + MinChange ScaleParams `json:"minChange,omitempty"` + + // StabilizationDuration defines the minimum delay in minutes between 2 consecutive scale operations + // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h" + StabilizationDuration *string `json:"stabilizationDuration,omitempty"` +} + +// ScaleParams defines the scaling parameters for the limits +type ScaleParams struct { + // Scale parameters for CPU + CPU ChangeParams `json:"cpu,omitempty"` + // Scale parameters for memory + Memory ChangeParams `json:"memory,omitempty"` + // Scale patameters for replicas + Replicas ChangeParams `json:"replicas,omitempty"` +} + +// VpaTemplate defines the template for VPA +type VpaTemplate struct { + // Metadata of the pods created from this template. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the behavior of a VPA. + // +optional + Spec VpaTemplateSpec `json:"spec,omitempty"` +} + +// HpaTemplate defines the template for HPA +type HpaTemplate struct { + // Metadata of the pods created from this template. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the behavior of a HPA. + // +optional + Spec HpaTemplateSpec `json:"spec,omitempty"` +} + +// HpaSpec defines spec for HPA +type HpaSpec struct { + // Selector is a label query that should match HPA. + // Must match in order to be controlled. + // If empty, defaulted to labels on HPA template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty"` + + // Deploy defines whether the HPA is deployed or not + Deploy bool `json:"deploy,omitempty"` + + // ScaleUp defines the parameters for scale up + ScaleUp ScaleType `json:"scaleUp,omitempty"` + + // ScaleDown defines the parameters for scale down + ScaleDown ScaleType `json:"scaleDown,omitempty"` + + // Template is the object that describes the HPA that will be created. + // +optional + Template HpaTemplate `json:"template,omitempty"` +} + +// HvpaSpec defines the desired state of Hvpa +type HvpaSpec struct { + // Replicas is the number of replicas of target resource + Replicas *int32 `json:"replicas,omitempty"` + + // Hpa defines the spec of HPA + Hpa HpaSpec `json:"hpa,omitempty"` + + // Vpa defines the spec of VPA + Vpa VpaSpec `json:"vpa,omitempty"` + + // WeightBasedScalingIntervals defines the intervals of replica counts, and the weights for scaling a deployment vertically + // If there are overlapping intervals, then the vpaWeight will be taken from the first matching interval + WeightBasedScalingIntervals []WeightBasedScalingInterval `json:"weightBasedScalingIntervals,omitempty"` + + // TargetRef points to the controller managing the set of pods for the autoscaler to control + TargetRef *autoscaling.CrossVersionObjectReference `json:"targetRef"` + + // MaintenanceTimeWindow contains information about the time window for maintenance operations. + // +optional + MaintenanceTimeWindow *MaintenanceTimeWindow `json:"maintenanceTimeWindow,omitempty"` +} + +// ChangeParams defines the parameters for scaling +type ChangeParams struct { + // Value is the absolute value of the scaling + // +optional + Value *string `json:"value,omitempty"` + // Percentage is the percentage of currently allocated value to be used for scaling + // +optional + Percentage *int32 `json:"percentage,omitempty"` +} + +// VpaWeight - weight to provide to VPA scaling +type VpaWeight int32 + +const ( + // VpaOnly - only vertical scaling + VpaOnly VpaWeight = 100 + // HpaOnly - only horizontal scaling + HpaOnly VpaWeight = 0 +) + +// LastError has detailed information of the error +type LastError struct { + // Description of the error + Description string `json:"description,omitempty"` + + // Time at which the error occurred + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` + + // LastOperation is the type of operation for which error occurred + LastOperation string `json:"lastOperation,omitempty"` +} + +// HvpaStatus defines the observed state of Hvpa +type HvpaStatus struct { + // Replicas is the number of replicas of the target resource. + Replicas *int32 `json:"replicas,omitempty"` + // TargetSelector is the string form of the label selector of HPA. This is required for HPA to work with scale subresource. + TargetSelector *string `json:"targetSelector,omitempty"` + // Current HPA UpdatePolicy set in the spec + HpaScaleUpUpdatePolicy *UpdatePolicy `json:"hpaScaleUpUpdatePolicy,omitempty"` + // Current HPA UpdatePolicy set in the spec + HpaScaleDownUpdatePolicy *UpdatePolicy `json:"hpaScaleDownUpdatePolicy,omitempty"` + // Current VPA UpdatePolicy set in the spec + VpaScaleUpUpdatePolicy *UpdatePolicy `json:"vpaScaleUpUpdatePolicy,omitempty"` + // Current VPA UpdatePolicy set in the spec + VpaScaleDownUpdatePolicy *UpdatePolicy `json:"vpaScaleDownUpdatePolicy,omitempty"` + + HpaWeight VpaWeight `json:"hpaWeight,omitempty"` + VpaWeight VpaWeight `json:"vpaWeight,omitempty"` + + // Override scale up stabilization window + OverrideScaleUpStabilization bool `json:"overrideScaleUpStabilization,omitempty"` + + LastBlockedScaling []*BlockedScaling `json:"lastBlockedScaling,omitempty"` + LastScaling ScalingStatus `json:"lastScaling,omitempty"` + + // LastError has details of any errors that occurred + LastError *LastError `json:"lastError,omitempty"` +} + +// BlockingReason defines the reason for blocking. +type BlockingReason string + +const ( + // BlockingReasonStabilizationWindow - HVPA is in stabilization window + BlockingReasonStabilizationWindow BlockingReason = "StabilizationWindow" + // BlockingReasonMaintenanceWindow - Resource is in maintenance window + BlockingReasonMaintenanceWindow BlockingReason = "MaintenanceWindow" + // BlockingReasonUpdatePolicy - Update policy doesn't support scaling + BlockingReasonUpdatePolicy BlockingReason = "UpdatePolicy" + // BlockingReasonWeight - VpaWeight doesn't support scaling + BlockingReasonWeight BlockingReason = "Weight" + // BlockingReasonMinChange - Min change doesn't support scaling + BlockingReasonMinChange BlockingReason = "MinChange" +) + +// BlockingReasons lists all the blocking reasons +var BlockingReasons = [...]BlockingReason{ + BlockingReasonMaintenanceWindow, + BlockingReasonMinChange, + BlockingReasonStabilizationWindow, + BlockingReasonUpdatePolicy, + BlockingReasonWeight, +} + +// BlockedScaling defines the details for blocked scaling +type BlockedScaling struct { + Reason BlockingReason `json:"reason,omitempty"` + ScalingStatus `json:"scalingStatus,omitempty"` +} + +// ScalingStatus defines the status of scaling +type ScalingStatus struct { + LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty"` + HpaStatus HpaStatus `json:"hpaStatus,omitempty" protobuf:"bytes,1,opt,name=hpaStatus"` + VpaStatus vpa_api.VerticalPodAutoscalerStatus `json:"vpaStatus,omitempty" protobuf:"bytes,2,opt,name=vpaStatus"` +} + +// HpaStatus defines the status of HPA +type HpaStatus struct { + CurrentReplicas int32 `json:"currentReplicas,omitempty"` + DesiredReplicas int32 `json:"desiredReplicas,omitempty"` +} + +// Hvpa is the Schema for the hvpas API +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.targetSelector +type Hvpa struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HvpaSpec `json:"spec,omitempty"` + Status HvpaStatus `json:"status,omitempty"` +} + +// HvpaList contains a list of Hvpa +// +kubebuilder:object:root=true +type HvpaList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Hvpa `json:"items"` +} + +/*func init() { + SchemeBuilder.Register(&Hvpa{}, &HvpaList{}) +}*/ diff --git a/vendor/github.com/gardener/hvpa-controller/api/v1alpha1/hvpa_webhook.go b/vendor/github.com/gardener/hvpa-controller/api/v1alpha1/hvpa_webhook.go new file mode 100644 index 000000000..bd1a5ae79 --- /dev/null +++ b/vendor/github.com/gardener/hvpa-controller/api/v1alpha1/hvpa_webhook.go @@ -0,0 +1,75 @@ +/* +Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +// log is for logging in this package. +var hvpalog = logf.Log.WithName("hvpa-resource") + +// SetupWebhookWithManager sets up manager with a new webhook and r as the reconcile.Reconciler +func (r *Hvpa) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +// +kubebuilder:webhook:path=/mutate-autoscaling-k8s-io-v1alpha1-hvpa,mutating=true,failurePolicy=fail,groups=autoscaling.k8s.io,resources=hvpas,verbs=create;update,versions=v1alpha1,name=mhvpa.kb.io + +var _ webhook.Defaulter = &Hvpa{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *Hvpa) Default() { + hvpalog.Info("default", "name", r.Name) + + // TODO(user): fill in your defaulting logic. +} + +// +kubebuilder:webhook:path=/validate-autoscaling-k8s-io-v1alpha1-hvpa,mutating=false,failurePolicy=fail,groups=autoscaling.k8s.io,resources=hvpas,verbs=create;update,versions=v1alpha1,name=vhvpa.kb.io + +var _ webhook.Validator = &Hvpa{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *Hvpa) ValidateCreate() error { + hvpalog.Info("validate create", "name", r.Name) + + // TODO(user): fill in your validation logic upon object creation. + return nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *Hvpa) ValidateUpdate(old runtime.Object) error { + hvpalog.Info("validate update", "name", r.Name) + + // TODO(user): fill in your validation logic upon object update. + return nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *Hvpa) ValidateDelete() error { + hvpalog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object update. + return nil +} diff --git a/vendor/github.com/gardener/hvpa-controller/api/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/gardener/hvpa-controller/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..ed364a5b5 --- /dev/null +++ b/vendor/github.com/gardener/hvpa-controller/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,498 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// autogenerated by controller-gen object, do not modify manually + +package v1alpha1 + +import ( + "k8s.io/api/autoscaling/v2beta1" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockedScaling) DeepCopyInto(out *BlockedScaling) { + *out = *in + in.ScalingStatus.DeepCopyInto(&out.ScalingStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockedScaling. +func (in *BlockedScaling) DeepCopy() *BlockedScaling { + if in == nil { + return nil + } + out := new(BlockedScaling) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChangeParams) DeepCopyInto(out *ChangeParams) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + if in.Percentage != nil { + in, out := &in.Percentage, &out.Percentage + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChangeParams. +func (in *ChangeParams) DeepCopy() *ChangeParams { + if in == nil { + return nil + } + out := new(ChangeParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HpaSpec) DeepCopyInto(out *HpaSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.ScaleUp.DeepCopyInto(&out.ScaleUp) + in.ScaleDown.DeepCopyInto(&out.ScaleDown) + in.Template.DeepCopyInto(&out.Template) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HpaSpec. +func (in *HpaSpec) DeepCopy() *HpaSpec { + if in == nil { + return nil + } + out := new(HpaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HpaStatus) DeepCopyInto(out *HpaStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HpaStatus. +func (in *HpaStatus) DeepCopy() *HpaStatus { + if in == nil { + return nil + } + out := new(HpaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HpaTemplate) DeepCopyInto(out *HpaTemplate) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HpaTemplate. +func (in *HpaTemplate) DeepCopy() *HpaTemplate { + if in == nil { + return nil + } + out := new(HpaTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HpaTemplateSpec) DeepCopyInto(out *HpaTemplateSpec) { + *out = *in + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]v2beta1.MetricSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HpaTemplateSpec. +func (in *HpaTemplateSpec) DeepCopy() *HpaTemplateSpec { + if in == nil { + return nil + } + out := new(HpaTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Hvpa) DeepCopyInto(out *Hvpa) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hvpa. +func (in *Hvpa) DeepCopy() *Hvpa { + if in == nil { + return nil + } + out := new(Hvpa) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Hvpa) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HvpaList) DeepCopyInto(out *HvpaList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Hvpa, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HvpaList. +func (in *HvpaList) DeepCopy() *HvpaList { + if in == nil { + return nil + } + out := new(HvpaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HvpaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HvpaSpec) DeepCopyInto(out *HvpaSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.Hpa.DeepCopyInto(&out.Hpa) + in.Vpa.DeepCopyInto(&out.Vpa) + if in.WeightBasedScalingIntervals != nil { + in, out := &in.WeightBasedScalingIntervals, &out.WeightBasedScalingIntervals + *out = make([]WeightBasedScalingInterval, len(*in)) + copy(*out, *in) + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(v2beta1.CrossVersionObjectReference) + **out = **in + } + if in.MaintenanceTimeWindow != nil { + in, out := &in.MaintenanceTimeWindow, &out.MaintenanceTimeWindow + *out = new(MaintenanceTimeWindow) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HvpaSpec. +func (in *HvpaSpec) DeepCopy() *HvpaSpec { + if in == nil { + return nil + } + out := new(HvpaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HvpaStatus) DeepCopyInto(out *HvpaStatus) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.TargetSelector != nil { + in, out := &in.TargetSelector, &out.TargetSelector + *out = new(string) + **out = **in + } + if in.HpaScaleUpUpdatePolicy != nil { + in, out := &in.HpaScaleUpUpdatePolicy, &out.HpaScaleUpUpdatePolicy + *out = new(UpdatePolicy) + (*in).DeepCopyInto(*out) + } + if in.HpaScaleDownUpdatePolicy != nil { + in, out := &in.HpaScaleDownUpdatePolicy, &out.HpaScaleDownUpdatePolicy + *out = new(UpdatePolicy) + (*in).DeepCopyInto(*out) + } + if in.VpaScaleUpUpdatePolicy != nil { + in, out := &in.VpaScaleUpUpdatePolicy, &out.VpaScaleUpUpdatePolicy + *out = new(UpdatePolicy) + (*in).DeepCopyInto(*out) + } + if in.VpaScaleDownUpdatePolicy != nil { + in, out := &in.VpaScaleDownUpdatePolicy, &out.VpaScaleDownUpdatePolicy + *out = new(UpdatePolicy) + (*in).DeepCopyInto(*out) + } + if in.LastBlockedScaling != nil { + in, out := &in.LastBlockedScaling, &out.LastBlockedScaling + *out = make([]*BlockedScaling, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(BlockedScaling) + (*in).DeepCopyInto(*out) + } + } + } + in.LastScaling.DeepCopyInto(&out.LastScaling) + if in.LastError != nil { + in, out := &in.LastError, &out.LastError + *out = new(LastError) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HvpaStatus. +func (in *HvpaStatus) DeepCopy() *HvpaStatus { + if in == nil { + return nil + } + out := new(HvpaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastError) DeepCopyInto(out *LastError) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastError. +func (in *LastError) DeepCopy() *LastError { + if in == nil { + return nil + } + out := new(LastError) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceTimeWindow) DeepCopyInto(out *MaintenanceTimeWindow) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceTimeWindow. +func (in *MaintenanceTimeWindow) DeepCopy() *MaintenanceTimeWindow { + if in == nil { + return nil + } + out := new(MaintenanceTimeWindow) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleParams) DeepCopyInto(out *ScaleParams) { + *out = *in + in.CPU.DeepCopyInto(&out.CPU) + in.Memory.DeepCopyInto(&out.Memory) + in.Replicas.DeepCopyInto(&out.Replicas) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleParams. +func (in *ScaleParams) DeepCopy() *ScaleParams { + if in == nil { + return nil + } + out := new(ScaleParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleType) DeepCopyInto(out *ScaleType) { + *out = *in + in.UpdatePolicy.DeepCopyInto(&out.UpdatePolicy) + in.MinChange.DeepCopyInto(&out.MinChange) + if in.StabilizationDuration != nil { + in, out := &in.StabilizationDuration, &out.StabilizationDuration + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleType. +func (in *ScaleType) DeepCopy() *ScaleType { + if in == nil { + return nil + } + out := new(ScaleType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingStatus) DeepCopyInto(out *ScalingStatus) { + *out = *in + if in.LastScaleTime != nil { + in, out := &in.LastScaleTime, &out.LastScaleTime + *out = new(v1.Time) + (*in).DeepCopyInto(*out) + } + out.HpaStatus = in.HpaStatus + in.VpaStatus.DeepCopyInto(&out.VpaStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingStatus. +func (in *ScalingStatus) DeepCopy() *ScalingStatus { + if in == nil { + return nil + } + out := new(ScalingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdatePolicy) DeepCopyInto(out *UpdatePolicy) { + *out = *in + if in.UpdateMode != nil { + in, out := &in.UpdateMode, &out.UpdateMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdatePolicy. +func (in *UpdatePolicy) DeepCopy() *UpdatePolicy { + if in == nil { + return nil + } + out := new(UpdatePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VpaSpec) DeepCopyInto(out *VpaSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.ScaleUp.DeepCopyInto(&out.ScaleUp) + in.ScaleDown.DeepCopyInto(&out.ScaleDown) + in.Template.DeepCopyInto(&out.Template) + in.LimitsRequestsGapScaleParams.DeepCopyInto(&out.LimitsRequestsGapScaleParams) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VpaSpec. +func (in *VpaSpec) DeepCopy() *VpaSpec { + if in == nil { + return nil + } + out := new(VpaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VpaTemplate) DeepCopyInto(out *VpaTemplate) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VpaTemplate. +func (in *VpaTemplate) DeepCopy() *VpaTemplate { + if in == nil { + return nil + } + out := new(VpaTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VpaTemplateSpec) DeepCopyInto(out *VpaTemplateSpec) { + *out = *in + if in.ResourcePolicy != nil { + in, out := &in.ResourcePolicy, &out.ResourcePolicy + *out = new(v1beta2.PodResourcePolicy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VpaTemplateSpec. +func (in *VpaTemplateSpec) DeepCopy() *VpaTemplateSpec { + if in == nil { + return nil + } + out := new(VpaTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeightBasedScalingInterval) DeepCopyInto(out *WeightBasedScalingInterval) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightBasedScalingInterval. +func (in *WeightBasedScalingInterval) DeepCopy() *WeightBasedScalingInterval { + if in == nil { + return nil + } + out := new(WeightBasedScalingInterval) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index 26296d024..e9b5520a1 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,9 +1,9 @@ # A more minimal logging API for Go -Before you consider this package, please read [this blog post by the inimitable -Dave Cheney](http://dave.cheney.net/2015/11/05/lets-talk-about-logging). I -really appreciate what he has to say, and it largely aligns with my own -experiences. Too many choices of levels means inconsistent logs. +Before you consider this package, please read [this blog post by the +inimitable Dave Cheney][warning-makes-no-sense]. I really appreciate what +he has to say, and it largely aligns with my own experiences. Too many +choices of levels means inconsistent logs. This package offers a purely abstract interface, based on these ideas but with a few twists. Code can depend on just this interface and have the actual @@ -31,6 +31,153 @@ may feel very similar, but the primary difference is the lack of semantics. Because verbosity is a numerical value, it's safe to assume that an app running with higher verbosity means more (and less important) logs will be generated. -This is a BETA grade API. I have implemented it for -[glog](https://godoc.org/github.com/golang/glog). Until there is a significant -2nd implementation, I don't really know how it will change. +This is a BETA grade API. + +There are implementations for the following logging libraries: + +- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) +- **k8s.io/klog**: [klogr](https://git.k8s.io/klog/klogr) +- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) +- **log** (the Go standard library logger): + [stdr](https://github.com/go-logr/stdr) +- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) +- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) +- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) + +# FAQ + +## Conceptual + +## Why structured logging? + +- **Structured logs are more easily queriable**: Since you've got + key-value pairs, it's much easier to query your structured logs for + particular values by filtering on the contents of a particular key -- + think searching request logs for error codes, Kubernetes reconcilers for + the name and namespace of the reconciled object, etc + +- **Structured logging makes it easier to have cross-referencable logs**: + Similarly to searchability, if you maintain conventions around your + keys, it becomes easy to gather all log lines related to a particular + concept. + +- **Structured logs allow better dimensions of filtering**: if you have + structure to your logs, you've got more precise control over how much + information is logged -- you might choose in a particular configuration + to log certain keys but not others, only log lines where a certain key + matches a certain value, etc, instead of just having v-levels and names + to key off of. + +- **Structured logs better represent structured data**: sometimes, the + data that you want to log is inherently structured (think tuple-link + objects). Structured logs allow you to preserve that structure when + outputting. + +## Why V-levels? + +**V-levels give operators an easy way to control the chattiness of log +operations**. V-levels provide a way for a given package to distinguish +the relative importance or verbosity of a given log message. Then, if +a particular logger or package is logging too many messages, the user +of the package can simply change the v-levels for that library. + +## Why not more named levels, like Warning? + +Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences +from Dave's ideas](#differences-from-daves-ideas). + +## Why not allow format strings, too? + +**Format strings negate many of the benefits of structured logs**: + +- They're not easily searchable without resorting to fuzzy searching, + regular expressions, etc + +- They don't store structured data well, since contents are flattened into + a string + +- They're not cross-referencable + +- They don't compress easily, since the message is not constant + +(unless you turn positional parameters into key-value pairs with numerical +keys, at which point you've gotten key-value logging with meaningless +keys) + +## Practical + +## Why key-value pairs, and not a map? + +Key-value pairs are *much* easier to optimize, especially around +allocations. Zap (a structured logger that inspired logr's interface) has +[performance measurements](https://github.com/uber-go/zap#performance) +that show this quite nicely. + +While the interface ends up being a little less obvious, you get +potentially better performance, plus avoid making users type +`map[string]string{}` every time they want to log. + +## What if my V-levels differ between libraries? + +That's fine. Control your V-levels on a per-logger basis, and use the +`WithName` function to pass different loggers to different libraries. + +Generally, you should take care to ensure that you have relatively +consistent V-levels within a given logger, however, as this makes deciding +on what verbosity of logs to request easier. + +## But I *really* want to use a format string! + +That's not actually a question. Assuming your question is "how do +I convert my mental model of logging with format strings to logging with +constant messages": + +1. figure out what the error actually is, as you'd write in a TL;DR style, + and use that as a message + +2. For every place you'd write a format specifier, look to the word before + it, and add that as a key value pair + +For instance, consider the following examples (all taken from spots in the +Kubernetes codebase): + +- `klog.V(4).Infof("Client is returning errors: code %v, error %v", + responseCode, err)` becomes `logger.Error(err, "client returned an + error", "code", responseCode)` + +- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", + seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after + response when requesting url", "attempt", retries, "after + seconds", seconds, "url", url)` + +If you *really* must use a format string, place it as a key value, and +call `fmt.Sprintf` yourself -- for instance, `log.Printf("unable to +reflect over type %T")` becomes `logger.Info("unable to reflect over +type", "type", fmt.Sprintf("%T"))`. In general though, the cases where +this is necessary should be few and far between. + +## How do I choose my V-levels? + +This is basically the only hard constraint: increase V-levels to denote +more verbose or more debug-y logs. + +Otherwise, you can start out with `0` as "you always want to see this", +`1` as "common logging that you might *possibly* want to turn off", and +`10` as "I would like to performance-test your log collection stack". + +Then gradually choose levels in between as you need them, working your way +down from 10 (for debug and trace style logs) and up from 1 (for chattier +info-type logs). + +## How do I choose my keys + +- make your keys human-readable +- constant keys are generally a good idea +- be consistent across your codebase +- keys should naturally match parts of the message string + +While key names are mostly unrestricted (and spaces are acceptable), +it's generally a good idea to stick to printable ascii characters, or at +least match the general character set of your log lines. + +[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go new file mode 100644 index 000000000..267c796c6 --- /dev/null +++ b/vendor/github.com/go-logr/logr/discard.go @@ -0,0 +1,35 @@ +package logr + +// Discard returns a valid Logger that discards all messages logged to it. +// It can be used whenever the caller is not interested in the logs. +func Discard() Logger { + return discardLogger{} +} + +// discardLogger is a Logger that discards all messages. +type discardLogger struct{} + +func (l discardLogger) Enabled() bool { + return false +} + +func (l discardLogger) Info(msg string, keysAndValues ...interface{}) { +} + +func (l discardLogger) Error(err error, msg string, keysAndValues ...interface{}) { +} + +func (l discardLogger) V(level int) Logger { + return l +} + +func (l discardLogger) WithValues(keysAndValues ...interface{}) Logger { + return l +} + +func (l discardLogger) WithName(name string) Logger { + return l +} + +// Verify that it actually implements the interface +var _ Logger = discardLogger{} diff --git a/vendor/github.com/go-logr/logr/go.mod b/vendor/github.com/go-logr/logr/go.mod new file mode 100644 index 000000000..591884e91 --- /dev/null +++ b/vendor/github.com/go-logr/logr/go.mod @@ -0,0 +1,3 @@ +module github.com/go-logr/logr + +go 1.14 diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index ad72e7886..e86896c6c 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -1,3 +1,19 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + // Package logr defines abstract interfaces for logging. Packages can depend on // these interfaces and callers can implement logging in whatever way is // appropriate. @@ -13,95 +29,131 @@ // // Usage // -// Logging is done using a Logger. Loggers can have name prefixes and named values -// attached, so that all log messages logged with that Logger have some base context -// associated. +// Logging is done using a Logger. Loggers can have name prefixes and named +// values attached, so that all log messages logged with that Logger have some +// base context associated. // -// The term "key" is used to refer to the name associated with a particular value, to -// disambiguate it from the general Logger name. +// The term "key" is used to refer to the name associated with a particular +// value, to disambiguate it from the general Logger name. // -// For instance, suppose we're trying to reconcile the state of an object, and we want -// to log that we've made some decision. +// For instance, suppose we're trying to reconcile the state of an object, and +// we want to log that we've made some decision. // -// With the traditional log package, we might write -// log.Printf( -// "decided to set field foo to value %q for object %s/%s", +// With the traditional log package, we might write: +// log.Printf("decided to set field foo to value %q for object %s/%s", // targetValue, object.Namespace, object.Name) // -// With logr's structured logging, we'd write -// // elsewhere in the file, set up the logger to log with the prefix of "reconcilers", -// // and the named value target-type=Foo, for extra context. -// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo") +// With logr's structured logging, we'd write: +// // elsewhere in the file, set up the logger to log with the prefix of +// // "reconcilers", and the named value target-type=Foo, for extra context. +// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo") // -// // later on... -// log.Info("setting field foo on object", "value", targetValue, "object", object) +// // later on... +// log.Info("setting foo on object", "value", targetValue, "object", object) // -// Depending on our logging implementation, we could then make logging decisions based on field values -// (like only logging such events for objects in a certain namespace), or copy the structured -// information into a structured log store. +// Depending on our logging implementation, we could then make logging decisions +// based on field values (like only logging such events for objects in a certain +// namespace), or copy the structured information into a structured log store. // -// For logging errors, Logger has a method called Error. Suppose we wanted to log an -// error while reconciling. With the traditional log package, we might write +// For logging errors, Logger has a method called Error. Suppose we wanted to +// log an error while reconciling. With the traditional log package, we might +// write: // log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err) // -// With logr, we'd instead write +// With logr, we'd instead write: // // assuming the above setup for log // log.Error(err, "unable to reconcile object", "object", object) // // This functions similarly to: // log.Info("unable to reconcile object", "error", err, "object", object) // -// However, it ensures that a standard key for the error value ("error") is used across all -// error logging. Furthermore, certain implementations may choose to attach additional -// information (such as stack traces) on calls to Error, so it's preferred to use Error -// to log errors. +// However, it ensures that a standard key for the error value ("error") is used +// across all error logging. Furthermore, certain implementations may choose to +// attach additional information (such as stack traces) on calls to Error, so +// it's preferred to use Error to log errors. // // Parts of a log line // // Each log message from a Logger has four types of context: // logger name, log verbosity, log message, and the named values. // -// The Logger name constists of a series of name "segments" added by successive calls to WithName. -// These name segments will be joined in some way by the underlying implementation. It is strongly -// reccomended that name segements contain simple identifiers (letters, digits, and hyphen), and do -// not contain characters that could muddle the log output or confuse the joining operation (e.g. -// whitespace, commas, periods, slashes, brackets, quotes, etc). +// The Logger name consists of a series of name "segments" added by successive +// calls to WithName. These name segments will be joined in some way by the +// underlying implementation. It is strongly recommended that name segments +// contain simple identifiers (letters, digits, and hyphen), and do not contain +// characters that could muddle the log output or confuse the joining operation +// (e.g. whitespace, commas, periods, slashes, brackets, quotes, etc). // -// Log verbosity represents how little a log matters. Level zero, the default, matters most. -// Increasing levels matter less and less. Try to avoid lots of different verbosity levels, -// and instead provide useful keys, logger names, and log messages for users to filter on. -// It's illegal to pass a log level below zero. +// Log verbosity represents how little a log matters. Level zero, the default, +// matters most. Increasing levels matter less and less. Try to avoid lots of +// different verbosity levels, and instead provide useful keys, logger names, +// and log messages for users to filter on. It's illegal to pass a log level +// below zero. // -// The log message consists of a constant message attached to the the log line. This -// should generally be a simple description of what's occuring, and should never be a format string. +// The log message consists of a constant message attached to the log line. +// This should generally be a simple description of what's occurring, and should +// never be a format string. // -// Variable information can then be attached using named values (key/value pairs). Keys are arbitrary -// strings, while values may be any Go value. +// Variable information can then be attached using named values (key/value +// pairs). Keys are arbitrary strings, while values may be any Go value. // // Key Naming Conventions // -// While users are generally free to use key names of their choice, it's generally best to avoid -// using the following keys, as they're frequently used by implementations: +// Keys are not strictly required to conform to any specification or regex, but +// it is recommended that they: +// * be human-readable and meaningful (not auto-generated or simple ordinals) +// * be constant (not dependent on input data) +// * contain only printable characters +// * not contain whitespace or punctuation +// +// These guidelines help ensure that log data is processed properly regardless +// of the log implementation. For example, log implementations will try to +// output JSON data or will store data for later database (e.g. SQL) queries. +// +// While users are generally free to use key names of their choice, it's +// generally best to avoid using the following keys, as they're frequently used +// by implementations: // -// - `"error"`: the underlying error value in the `Error` method. -// - `"stacktrace"`: the stack trace associated with a particular log line or error -// (often from the `Error` message). // - `"caller"`: the calling information (file/line) of a particular log line. -// - `"msg"`: the log message. +// - `"error"`: the underlying error value in the `Error` method. // - `"level"`: the log level. +// - `"logger"`: the name of the associated logger. +// - `"msg"`: the log message. +// - `"stacktrace"`: the stack trace associated with a particular log line or +// error (often from the `Error` message). // - `"ts"`: the timestamp for a log line. // -// Implementations are encouraged to make use of these keys to represent the above -// concepts, when neccessary (for example, in a pure-JSON output form, it would be -// necessary to represent at least message and timestamp as ordinary named values). +// Implementations are encouraged to make use of these keys to represent the +// above concepts, when necessary (for example, in a pure-JSON output form, it +// would be necessary to represent at least message and timestamp as ordinary +// named values). +// +// Implementations may choose to give callers access to the underlying +// logging implementation. The recommended pattern for this is: +// // Underlier exposes access to the underlying logging implementation. +// // Since callers only have a logr.Logger, they have to know which +// // implementation is in use, so this interface is less of an abstraction +// // and more of way to test type conversion. +// type Underlier interface { +// GetUnderlying() +// } package logr +import ( + "context" +) + // TODO: consider adding back in format strings if they're really needed // TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects) // TODO: consider other bits of glog functionality like Flush, InfoDepth, OutputStats -// InfoLogger represents the ability to log non-error messages, at a particular verbosity. -type InfoLogger interface { +// Logger represents the ability to log messages, both errors and not. +type Logger interface { + // Enabled tests whether this Logger is enabled. For example, commandline + // flags might be used to set the logging verbosity and disable some info + // logs. + Enabled() bool + // Info logs a non-error message with the given key/value pairs as context. // // The msg argument should be used to add some constant description to @@ -110,19 +162,6 @@ type InfoLogger interface { // keys and arbitrary values. Info(msg string, keysAndValues ...interface{}) - // Enabled tests whether this InfoLogger is enabled. For example, - // commandline flags might be used to set the logging verbosity and disable - // some info logs. - Enabled() bool -} - -// Logger represents the ability to log messages, both errors and not. -type Logger interface { - // All Loggers implement InfoLogger. Calling InfoLogger methods directly on - // a Logger value is equivalent to calling them on a V(0) InfoLogger. For - // example, logger.Info() produces the same result as logger.V(0).Info. - InfoLogger - // Error logs an error, with the given message and key/value pairs as context. // It functions similarly to calling Info with the "error" named value, but may // have unique behavior, and should be preferred for logging errors (see the @@ -133,10 +172,11 @@ type Logger interface { // triggered this log line, if present. Error(err error, msg string, keysAndValues ...interface{}) - // V returns an InfoLogger value for a specific verbosity level. A higher - // verbosity level means a log message is less important. It's illegal to - // pass a log level less than zero. - V(level int) InfoLogger + // V returns an Logger value for a specific verbosity level, relative to + // this Logger. In other words, V values are additive. V higher verbosity + // level means a log message is less important. It's illegal to pass a log + // level less than zero. + V(level int) Logger // WithValues adds some key-value pairs of context to a logger. // See Info for documentation on how key/value pairs work. @@ -144,8 +184,39 @@ type Logger interface { // WithName adds a new element to the logger's name. // Successive calls with WithName continue to append - // suffixes to the logger's name. It's strongly reccomended + // suffixes to the logger's name. It's strongly recommended // that name segments contain only letters, digits, and hyphens // (see the package documentation for more information). WithName(name string) Logger } + +// InfoLogger provides compatibility with code that relies on the v0.1.0 interface +// Deprecated: use Logger instead. This will be removed in a future release. +type InfoLogger = Logger + +type contextKey struct{} + +// FromContext returns a Logger constructed from ctx or nil if no +// logger details are found. +func FromContext(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return nil +} + +// FromContextOrDiscard returns a Logger constructed from ctx or a Logger +// that discards all messages if no logger details are found. +func FromContextOrDiscard(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return discardLogger{} +} + +// NewContext returns a new context derived from ctx that embeds the Logger. +func NewContext(ctx context.Context, l Logger) context.Context { + return context.WithValue(ctx, contextKey{}, l) +} diff --git a/vendor/github.com/go-logr/zapr/README.md b/vendor/github.com/go-logr/zapr/README.md index 8472875fa..548470ee1 100644 --- a/vendor/github.com/go-logr/zapr/README.md +++ b/vendor/github.com/go-logr/zapr/README.md @@ -2,7 +2,7 @@ Zapr :zap: ========== A [logr](https://github.com/go-logr/logr) implementation using -[Zap](go.uber.org/zap). +[Zap](https://github.com/uber-go/zap). Usage ----- @@ -13,7 +13,7 @@ import ( "go.uber.org/zap" "github.com/go-logr/logr" - "github.com/directxman12/zapr" + "github.com/go-logr/zapr" ) func main() { diff --git a/vendor/github.com/go-logr/zapr/go.mod b/vendor/github.com/go-logr/zapr/go.mod new file mode 100644 index 000000000..f7d7e256c --- /dev/null +++ b/vendor/github.com/go-logr/zapr/go.mod @@ -0,0 +1,10 @@ +module github.com/go-logr/zapr + +go 1.12 + +require ( + github.com/go-logr/logr v0.2.0 + go.uber.org/atomic v1.3.2 + go.uber.org/multierr v1.1.0 + go.uber.org/zap v1.8.0 +) diff --git a/vendor/github.com/go-logr/zapr/zapr.go b/vendor/github.com/go-logr/zapr/zapr.go index a9a10ae2e..09a074d07 100644 --- a/vendor/github.com/go-logr/zapr/zapr.go +++ b/vendor/github.com/go-logr/zapr/zapr.go @@ -1,3 +1,19 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + // Copyright 2018 Solly Ross // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,7 +43,7 @@ // For the most part, concepts in Zap correspond directly with those in // logr. // -// Unlike Zap, all fields *must* be in the form of suggared fields -- +// Unlike Zap, all fields *must* be in the form of sugared fields -- // it's illegal to pass a strongly-typed Zap field in a key position // to any of the log methods. // @@ -43,40 +59,18 @@ import ( "go.uber.org/zap/zapcore" ) -// noopInfoLogger is a logr.InfoLogger that's always disabled, and does nothing. -type noopInfoLogger struct{} - -func (l *noopInfoLogger) Enabled() bool { return false } -func (l *noopInfoLogger) Info(_ string, _ ...interface{}) {} - -var disabledInfoLogger = &noopInfoLogger{} - // NB: right now, we always use the equivalent of sugared logging. // This is necessary, since logr doesn't define non-suggared types, // and using zap-specific non-suggared types would make uses tied // directly to Zap. -// infoLogger is a logr.InfoLogger that uses Zap to log at a particular -// level. The level has already been converted to a Zap level, which -// is to say that `logrLevel = -1*zapLevel`. -type infoLogger struct { - lvl zapcore.Level - l *zap.Logger -} - -func (l *infoLogger) Enabled() bool { return true } -func (l *infoLogger) Info(msg string, keysAndVals ...interface{}) { - if checkedEntry := l.l.Check(l.lvl, msg); checkedEntry != nil { - checkedEntry.Write(handleFields(l.l, keysAndVals)...) - } -} - -// zapLogger is a logr.Logger that uses Zap to log. +// zapLogger is a logr.Logger that uses Zap to log. The level has already been +// converted to a Zap level, which is to say that `logrLevel = -1*zapLevel`. type zapLogger struct { // NB: this looks very similar to zap.SugaredLogger, but // deals with our desire to have multiple verbosity levels. - l *zap.Logger - infoLogger + l *zap.Logger + lvl zapcore.Level } // handleFields converts a bunch of arbitrary key-value pairs into Zap fields. It takes @@ -124,40 +118,50 @@ func handleFields(l *zap.Logger, args []interface{}, additional ...zap.Field) [] return append(fields, additional...) } -func (l *zapLogger) Error(err error, msg string, keysAndVals ...interface{}) { - if checkedEntry := l.l.Check(zap.ErrorLevel, msg); checkedEntry != nil { - checkedEntry.Write(handleFields(l.l, keysAndVals, zap.Error(err))...) +func (zl *zapLogger) Enabled() bool { + return zl.l.Core().Enabled(zl.lvl) +} + +func (zl *zapLogger) Info(msg string, keysAndVals ...interface{}) { + if checkedEntry := zl.l.Check(zl.lvl, msg); checkedEntry != nil { + checkedEntry.Write(handleFields(zl.l, keysAndVals)...) } } -func (l *zapLogger) V(level int) logr.InfoLogger { - lvl := zapcore.Level(-1 * level) - if l.l.Core().Enabled(lvl) { - return &infoLogger{ - lvl: lvl, - l: l.l, - } +func (zl *zapLogger) Error(err error, msg string, keysAndVals ...interface{}) { + if checkedEntry := zl.l.Check(zap.ErrorLevel, msg); checkedEntry != nil { + checkedEntry.Write(handleFields(zl.l, keysAndVals, zap.Error(err))...) } - return disabledInfoLogger } -func (l *zapLogger) WithValues(keysAndValues ...interface{}) logr.Logger { - newLogger := l.l.With(handleFields(l.l, keysAndValues)...) - return NewLogger(newLogger) +func (zl *zapLogger) V(level int) logr.Logger { + return &zapLogger{ + lvl: zl.lvl - zapcore.Level(level), + l: zl.l, + } } -func (l *zapLogger) WithName(name string) logr.Logger { - newLogger := l.l.Named(name) - return NewLogger(newLogger) +func (zl *zapLogger) WithValues(keysAndValues ...interface{}) logr.Logger { + newLogger := zl.l.With(handleFields(zl.l, keysAndValues)...) + return newLoggerWithExtraSkip(newLogger, 0) } -// NewLogger creates a new logr.Logger using the given Zap Logger to log. -func NewLogger(l *zap.Logger) logr.Logger { +func (zl *zapLogger) WithName(name string) logr.Logger { + newLogger := zl.l.Named(name) + return newLoggerWithExtraSkip(newLogger, 0) +} + +// newLoggerWithExtraSkip allows creation of loggers with variable levels of callstack skipping +func newLoggerWithExtraSkip(l *zap.Logger, callerSkip int) logr.Logger { + log := l.WithOptions(zap.AddCallerSkip(callerSkip)) return &zapLogger{ - l: l, - infoLogger: infoLogger{ - l: l, - lvl: zap.InfoLevel, - }, + l: log, + lvl: zap.InfoLevel, } } + +// NewLogger creates a new logr.Logger using the given Zap Logger to log. +func NewLogger(l *zap.Logger) logr.Logger { + // creates a new logger skipping one level of callstack + return newLoggerWithExtraSkip(l, 1) +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile new file mode 100644 index 000000000..0b4659b73 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/Makefile @@ -0,0 +1,37 @@ +# Protocol Buffers for Go with Gadgets +# +# Copyright (c) 2013, The GoGo Authors. All rights reserved. +# http://github.com/gogo/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto + +restore: + cp gogo.pb.golden gogo.pb.go + +preserve: + cp gogo.pb.go gogo.pb.golden diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go new file mode 100644 index 000000000..081c86fa8 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -0,0 +1,169 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package gogoproto provides extensions for protocol buffers to achieve: + + - fast marshalling and unmarshalling. + - peace of mind by optionally generating test and benchmark code. + - more canonical Go structures. + - less typing by optionally generating extra helper code. + - goprotobuf compatibility + +More Canonical Go Structures + +A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. +Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. + + - nullable, if false, a field is generated without a pointer (see warning below). + - embed, if true, the field is generated as an embedded field. + - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 + - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. + - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. + - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + +Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +for a quicker overview. + +The following message: + + package test; + + import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +Will generate a go struct which looks a lot like this: + + type A struct { + Description string + Number int64 + Id github_com_gogo_protobuf_test_custom.Uuid + } + +You will see there are no pointers, since all fields are non-nullable. +You will also see a custom type which marshals to a string. +Be warned it is your responsibility to test your custom types thoroughly. +You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. + +Next we will embed the message A in message B. + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +See below that A is embedded in B. + + type B struct { + A + G []github_com_gogo_protobuf_test_custom.Uint128 + } + +Also see the repeated custom type. + + type Uint128 [2]uint64 + +Next we will create a custom name for one of our fields. + + message C { + optional int64 size = 1 [(gogoproto.customname) = "MySize"]; + } + +See below that the field's name is MySize and not Size. + + type C struct { + MySize *int64 + } + +The is useful when having a protocol buffer message with a field name which conflicts with a generated method. +As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. +Using customname you can fix this error without changing the field name. +This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. + +Gogoprotobuf also has some more subtle changes, these could be changed back: + + - the generated package name for imports do not have the extra /filename.pb, + but are actually the imports specified in the .proto file. + +Gogoprotobuf also has lost some features which should be brought back with time: + + - Marshalling and unmarshalling with reflect and without the unsafe package, + this requires work in pointer_reflect.go + +Why does nullable break protocol buffer specifications: + +The protocol buffer specification states, somewhere, that you should be able to tell whether a +field is set or unset. With the option nullable=false this feature is lost, +since your non-nullable fields will always be set. It can be seen as a layer on top of +protocol buffers, where before and after marshalling all non-nullable fields are set +and they cannot be unset. + +Goprotobuf Compatibility: + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: + + - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. + - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix + - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. + - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face + - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. + - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension + - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). + +Less Typing and Peace of Mind is explained in their specific plugin folders godoc: + + - github.com/gogo/protobuf/plugin/ + +If you do not use any of these extension the code that is generated +will be the same as if goprotobuf has generated it. + +The most complete way to see examples is to look at + + github.com/gogo/protobuf/test/thetest.proto + +Gogoprototest is a seperate project, +because we want to keep gogoprotobuf independent of goprotobuf, +but we still want to test it thoroughly. + +*/ +package gogoproto diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go new file mode 100644 index 000000000..1e91766ae --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -0,0 +1,874 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gogo.proto + +package gogoproto + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 62023, + Name: "gogoproto.enum_customname", + Tag: "bytes,62023,opt,name=enum_customname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", +} + +var E_EnumvalueCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 66001, + Name: "gogoproto.enumvalue_customname", + Tag: "bytes,66001,opt,name=enumvalue_customname", + Filename: "gogo.proto", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all", + Filename: "gogo.proto", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all", + Filename: "gogo.proto", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all", + Filename: "gogo.proto", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all", + Filename: "gogo.proto", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all", + Filename: "gogo.proto", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all", + Filename: "gogo.proto", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all", + Filename: "gogo.proto", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all", + Filename: "gogo.proto", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all", + Filename: "gogo.proto", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all", + Filename: "gogo.proto", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all", + Filename: "gogo.proto", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all", + Filename: "gogo.proto", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all", + Filename: "gogo.proto", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all", + Filename: "gogo.proto", +} + +var E_StableMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63019, + Name: "gogoproto.stable_marshaler_all", + Tag: "varint,63019,opt,name=stable_marshaler_all", + Filename: "gogo.proto", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all", + Filename: "gogo.proto", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all", + Filename: "gogo.proto", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all", + Filename: "gogo.proto", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import", + Filename: "gogo.proto", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all", + Filename: "gogo.proto", +} + +var E_CompareAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63029, + Name: "gogoproto.compare_all", + Tag: "varint,63029,opt,name=compare_all", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration", + Filename: "gogo.proto", +} + +var E_MessagenameAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63033, + Name: "gogoproto.messagename_all", + Tag: "varint,63033,opt,name=messagename_all", + Filename: "gogo.proto", +} + +var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63034, + Name: "gogoproto.goproto_sizecache_all", + Tag: "varint,63034,opt,name=goproto_sizecache_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63035, + Name: "gogoproto.goproto_unkeyed_all", + Tag: "varint,63035,opt,name=goproto_unkeyed_all", + Filename: "gogo.proto", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters", + Filename: "gogo.proto", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer", + Filename: "gogo.proto", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal", + Filename: "gogo.proto", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", +} + +var E_StableMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64019, + Name: "gogoproto.stable_marshaler", + Tag: "varint,64019,opt,name=stable_marshaler", + Filename: "gogo.proto", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized", + Filename: "gogo.proto", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", +} + +var E_Compare = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64029, + Name: "gogoproto.compare", + Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", +} + +var E_Messagename = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64033, + Name: "gogoproto.messagename", + Tag: "varint,64033,opt,name=messagename", + Filename: "gogo.proto", +} + +var E_GoprotoSizecache = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64034, + Name: "gogoproto.goproto_sizecache", + Tag: "varint,64034,opt,name=goproto_sizecache", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64035, + Name: "gogoproto.goproto_unkeyed", + Tag: "varint,64035,opt,name=goproto_unkeyed", + Filename: "gogo.proto", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", +} + +var E_Wktpointer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65012, + Name: "gogoproto.wktpointer", + Tag: "varint,65012,opt,name=wktpointer", + Filename: "gogo.proto", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) + proto.RegisterExtension(E_EnumvalueCustomname) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_StableMarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_MessagenameAll) + proto.RegisterExtension(E_GoprotoSizecacheAll) + proto.RegisterExtension(E_GoprotoUnkeyedAll) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_StableMarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Messagename) + proto.RegisterExtension(E_GoprotoSizecache) + proto.RegisterExtension(E_GoprotoUnkeyed) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) + proto.RegisterExtension(E_Wktpointer) +} + +func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) } + +var fileDescriptor_592445b5231bc2b9 = []byte{ + // 1328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, + 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, + 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, + 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, + 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, + 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, + 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, + 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, + 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, + 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, + 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, + 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, + 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, + 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, + 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, + 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, + 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, + 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, + 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, + 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, + 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, + 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, + 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, + 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, + 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, + 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, + 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, + 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, + 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, + 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, + 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, + 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, + 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, + 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, + 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, + 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, + 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, + 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, + 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, + 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, + 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, + 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, + 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, + 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, + 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, + 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, + 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, + 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, + 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, + 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, + 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, + 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, + 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, + 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, + 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, + 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, + 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, + 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, + 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, + 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, + 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, + 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, + 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, + 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, + 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, + 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, + 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, + 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, + 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, + 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, + 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, + 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, + 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, + 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, + 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, + 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, + 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, + 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, + 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, + 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, + 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, + 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, + 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden new file mode 100644 index 000000000..f6502e4b9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden @@ -0,0 +1,45 @@ +// Code generated by protoc-gen-go. +// source: gogo.proto +// DO NOT EDIT! + +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import json "encoding/json" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51235, + Name: "gogoproto.nullable", + Tag: "varint,51235,opt,name=nullable", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51236, + Name: "gogoproto.embed", + Tag: "varint,51236,opt,name=embed", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 51237, + Name: "gogoproto.customtype", + Tag: "bytes,51237,opt,name=customtype", +} + +func init() { + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto new file mode 100644 index 000000000..b80c85653 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -0,0 +1,144 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; + optional bool wktpointer = 65012; + +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go new file mode 100644 index 000000000..390d4e4be --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -0,0 +1,415 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gogoproto + +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import proto "github.com/gogo/protobuf/proto" + +func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Embed, false) +} + +func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Nullable, true) +} + +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + +func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" +} + +func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" +} + +func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" +} + +func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" +} + +func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" +} + +func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" +} + +func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" +} + +func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" +} + +func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" +} + +func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { + return (IsStdTime(field) || IsStdDuration(field) || + IsStdDouble(field) || IsStdFloat(field) || + IsStdInt64(field) || IsStdUInt64(field) || + IsStdInt32(field) || IsStdUInt32(field) || + IsStdBool(field) || + IsStdString(field) || IsStdBytes(field)) +} + +func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) +} + +func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { + nullable := IsNullable(field) + if field.IsMessage() || IsCustomType(field) { + return nullable + } + if proto3 { + return false + } + return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES +} + +func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCustomType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastKey(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastValue(field) + if len(typ) > 0 { + return true + } + return false +} + +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + +func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customtype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Casttype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castkey) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castvalue) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { + name := GetCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { + name := GetEnumCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { + name := GetEnumValueCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Jsontag) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Moretags) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool + +func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) +} + +func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) +} + +func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) +} + +func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) +} + +func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) +} + +func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) +} + +func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) +} + +func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) +} + +func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) +} + +func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) +} + +func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) +} + +func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) +} + +func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) +} + +func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) +} + +func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) +} + +func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) +} + +func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) +} + +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + +func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) +} + +func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) +} + +func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) +} + +func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) +} + +func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) +} + +func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) +} + +func IsProto3(file *google_protobuf.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) +} + +func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) +} + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} + +func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) +} + +func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) +} + +func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) +} diff --git a/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go new file mode 100644 index 000000000..e8134ec8b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go @@ -0,0 +1,1435 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. +It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. + +This package produces a different output than the standard "encoding/json" package, +which does not operate correctly on protocol buffers. +*/ +package jsonpb + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" +) + +const secondInNanos = int64(time.Second / time.Nanosecond) +const maxSecondsInDuration = 315576000000 + +// Marshaler is a configurable object for converting between +// protocol buffer objects and a JSON representation for them. +type Marshaler struct { + // Whether to render enum values as integers, as opposed to string values. + EnumsAsInts bool + + // Whether to render fields with zero values. + EmitDefaults bool + + // A string to indent each level by. The presence of this field will + // also cause a space to appear between the field separator and + // value, and for newlines to be appear between fields and array + // elements. + Indent string + + // Whether to use the original (.proto) name for fields. + OrigName bool + + // A custom URL resolver to use when marshaling Any messages to JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// AnyResolver takes a type URL, present in an Any message, and resolves it into +// an instance of the associated message. +type AnyResolver interface { + Resolve(typeUrl string) (proto.Message, error) +} + +func defaultResolveAny(typeUrl string) (proto.Message, error) { + // Only the part of typeUrl after the last slash is relevant. + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return nil, fmt.Errorf("unknown message type %q", mname) + } + return reflect.New(mt.Elem()).Interface().(proto.Message), nil +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should +// also implement JSONPBUnmarshaler so that the custom format can be +// parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize +// the way they are unmarshaled from JSON. Messages that implement this +// should also implement JSONPBMarshaler so that the custom format can be +// produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Marshal marshals a protocol buffer into JSON. +func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + v := reflect.ValueOf(pb) + if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return errors.New("Marshal called with nil") + } + // Check for unset required fields first. + if err := checkRequiredFields(pb); err != nil { + return err + } + writer := &errWriter{writer: out} + return m.marshalObject(writer, pb, "", "") +} + +// MarshalToString converts a protocol buffer object to JSON string. +func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { + var buf bytes.Buffer + if err := m.Marshal(&buf, pb); err != nil { + return "", err + } + return buf.String(), nil +} + +type int32Slice []int32 + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(1), + `"-Infinity"`: math.Inf(-1), +} + +// For sorting extensions ids to ensure stable output. +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type isWkt interface { + XXX_WellKnownType() string +} + +var ( + wktType = reflect.TypeOf((*isWkt)(nil)).Elem() + messageType = reflect.TypeOf((*proto.Message)(nil)).Elem() +) + +// marshalObject writes a struct to the Writer. +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + if jsm, ok := v.(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(m) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", v, err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if m.Indent != "" { + b, err = json.MarshalIndent(js, indent, m.Indent) + } else { + b, err = json.Marshal(js) + } + if err != nil { + return err + } + } + + out.write(string(b)) + return out.err + } + + s := reflect.ValueOf(v).Elem() + + // Handle well-known types. + if wkt, ok := v.(isWkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, ..." + sprop := proto.GetProperties(s.Type()) + return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) + case "Any": + // Any is a bit more involved. + return m.marshalAny(out, v, indent) + case "Duration": + s, ns := s.Field(0).Int(), s.Field(1).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + f := "%d.%09d" + if ns < 0 { + ns = -ns + if s == 0 { + f = "-%d.%09d" + } + } + x := fmt.Sprintf(f, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`s"`) + return out.err + case "Struct", "ListValue": + // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s, ns := s.Field(0).Int(), s.Field(1).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`Z"`) + return out.err + case "Value": + // Value has a single oneof. + kind := s.Field(0) + if kind.IsNil() { + // "absence of any variant indicates an error" + return errors.New("nil Value") + } + // oneof -> *T -> T -> T.F + x := kind.Elem().Elem().Field(0) + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, x, indent) + } + } + + out.write("{") + if m.Indent != "" { + out.write("\n") + } + + firstField := true + + if typeURL != "" { + if err := m.marshalTypeURL(out, indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < s.NumField(); i++ { + value := s.Field(i) + valueField := s.Type().Field(i) + if strings.HasPrefix(valueField.Name, "XXX_") { + continue + } + + //this is not a protobuf field + if valueField.Tag.Get("protobuf") == "" && valueField.Tag.Get("protobuf_oneof") == "" { + continue + } + + // IsNil will panic on most value kinds. + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface: + if value.IsNil() { + continue + } + } + + if !m.EmitDefaults { + switch value.Kind() { + case reflect.Bool: + if !value.Bool() { + continue + } + case reflect.Int32, reflect.Int64: + if value.Int() == 0 { + continue + } + case reflect.Uint32, reflect.Uint64: + if value.Uint() == 0 { + continue + } + case reflect.Float32, reflect.Float64: + if value.Float() == 0 { + continue + } + case reflect.String: + if value.Len() == 0 { + continue + } + case reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } + } + } + + // Oneof fields need special handling. + if valueField.Tag.Get("protobuf_oneof") != "" { + // value is an interface containing &T{real_value}. + sv := value.Elem().Elem() // interface -> *T -> T + value = sv.Field(0) + valueField = sv.Type().Field(0) + } + prop := jsonProperties(valueField, m.OrigName) + if !firstField { + m.writeSep(out) + } + // If the map value is a cast type, it may not implement proto.Message, therefore + // allow the struct tag to declare the underlying message type. Change the property + // of the child types, use CustomType as a passer. CastType currently property is + // not used in json encoding. + if value.Kind() == reflect.Map { + if tag := valueField.Tag.Get("protobuf"); tag != "" { + for _, v := range strings.Split(tag, ",") { + if !strings.HasPrefix(v, "castvaluetype=") { + continue + } + v = strings.TrimPrefix(v, "castvaluetype=") + prop.MapValProp.CustomType = v + break + } + } + } + if err := m.marshalField(out, prop, value, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if ep, ok := v.(proto.Message); ok { + extensions := proto.RegisteredExtensions(v) + // Sort extensions for stable output. + ids := make([]int32, 0, len(extensions)) + for id, desc := range extensions { + if !proto.HasExtension(ep, desc) { + continue + } + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + for _, id := range ids { + desc := extensions[id] + if desc == nil { + // unknown extension + continue + } + ext, extErr := proto.GetExtension(ep, desc) + if extErr != nil { + return extErr + } + value := reflect.ValueOf(ext) + var prop proto.Properties + prop.Parse(desc.Tag) + prop.JSONName = fmt.Sprintf("[%s]", desc.Name) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, &prop, value, indent); err != nil { + return err + } + firstField = false + } + + } + + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err +} + +func (m *Marshaler) writeSep(out *errWriter) { + if m.Indent != "" { + out.write(",\n") + } else { + out.write(",") + } +} + +func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + v := reflect.ValueOf(any).Elem() + turl := v.Field(0).String() + val := v.Field(1).Bytes() + + var msg proto.Message + var err error + if m.AnyResolver != nil { + msg, err = m.AnyResolver.Resolve(turl) + } else { + msg, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if err := proto.Unmarshal(val, msg); err != nil { + return err + } + + if _, ok := msg.(isWkt); ok { + out.write("{") + if m.Indent != "" { + out.write("\n") + } + if err := m.marshalTypeURL(out, indent, turl); err != nil { + return err + } + m.writeSep(out) + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + out.write(`"value": `) + } else { + out.write(`"value":`) + } + if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { + return err + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err + } + + return m.marshalObject(out, msg, indent, turl) +} + +func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"@type":`) + if m.Indent != "" { + out.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + out.write(string(b)) + return out.err +} + +// marshalField writes field description and value to the Writer. +func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"`) + out.write(prop.JSONName) + out.write(`":`) + if m.Indent != "" { + out.write(" ") + } + if err := m.marshalValue(out, prop, v, indent); err != nil { + return err + } + return nil +} + +// marshalValue writes the value to the Writer. +func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + + v = reflect.Indirect(v) + + // Handle nil pointer + if v.Kind() == reflect.Invalid { + out.write("null") + return out.err + } + + // Handle repeated elements. + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + out.write("[") + comma := "" + for i := 0; i < v.Len(); i++ { + sliceVal := v.Index(i) + out.write(comma) + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { + return err + } + comma = "," + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write("]") + return out.err + } + + // Handle well-known types. + // Most are handled up in marshalObject (because 99% are messages). + if v.Type().Implements(wktType) { + wkt := v.Interface().(isWkt) + switch wkt.XXX_WellKnownType() { + case "NullValue": + out.write("null") + return out.err + } + } + + if t, ok := v.Interface().(time.Time); ok { + ts, err := types.TimestampProto(t) + if err != nil { + return err + } + return m.marshalValue(out, prop, reflect.ValueOf(ts), indent) + } + + if d, ok := v.Interface().(time.Duration); ok { + dur := types.DurationProto(d) + return m.marshalValue(out, prop, reflect.ValueOf(dur), indent) + } + + // Handle enumerations. + if !m.EnumsAsInts && prop.Enum != "" { + // Unknown enum values will are stringified by the proto library as their + // value. Such values should _not_ be quoted or they will be interpreted + // as an enum string instead of their value. + enumStr := v.Interface().(fmt.Stringer).String() + var valStr string + if v.Kind() == reflect.Ptr { + valStr = strconv.Itoa(int(v.Elem().Int())) + } else { + valStr = strconv.Itoa(int(v.Int())) + } + + if m, ok := v.Interface().(interface { + MarshalJSON() ([]byte, error) + }); ok { + data, err := m.MarshalJSON() + if err != nil { + return err + } + enumStr = string(data) + enumStr, err = strconv.Unquote(enumStr) + if err != nil { + return err + } + } + + isKnownEnum := enumStr != valStr + + if isKnownEnum { + out.write(`"`) + } + out.write(enumStr) + if isKnownEnum { + out.write(`"`) + } + return out.err + } + + // Handle nested messages. + if v.Kind() == reflect.Struct { + i := v + if v.CanAddr() { + i = v.Addr() + } else { + i = reflect.New(v.Type()) + i.Elem().Set(v) + } + iface := i.Interface() + if iface == nil { + out.write(`null`) + return out.err + } + + if m, ok := v.Interface().(interface { + MarshalJSON() ([]byte, error) + }); ok { + data, err := m.MarshalJSON() + if err != nil { + return err + } + out.write(string(data)) + return nil + } + + pm, ok := iface.(proto.Message) + if !ok { + if prop.CustomType == "" { + return fmt.Errorf("%v does not implement proto.Message", v.Type()) + } + t := proto.MessageType(prop.CustomType) + if t == nil || !i.Type().ConvertibleTo(t) { + return fmt.Errorf("%v declared custom type %s but it is not convertible to %v", v.Type(), prop.CustomType, t) + } + pm = i.Convert(t).Interface().(proto.Message) + } + return m.marshalObject(out, pm, indent+m.Indent, "") + } + + // Handle maps. + // Since Go randomizes map iteration, we sort keys for stable output. + if v.Kind() == reflect.Map { + out.write(`{`) + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for i, k := range keys { + if i > 0 { + out.write(`,`) + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + + // TODO handle map key prop properly + b, err := json.Marshal(k.Interface()) + if err != nil { + return err + } + s := string(b) + + // If the JSON is not a string value, encode it again to make it one. + if !strings.HasPrefix(s, `"`) { + b, err := json.Marshal(s) + if err != nil { + return err + } + s = string(b) + } + + out.write(s) + out.write(`:`) + if m.Indent != "" { + out.write(` `) + } + + vprop := prop + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil { + return err + } + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write(`}`) + return out.err + } + + // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + f := v.Float() + var sval string + switch { + case math.IsInf(f, 1): + sval = `"Infinity"` + case math.IsInf(f, -1): + sval = `"-Infinity"` + case math.IsNaN(f): + sval = `"NaN"` + } + if sval != "" { + out.write(sval) + return out.err + } + } + + // Default handling defers to the encoding/json library. + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) + if needToQuote { + out.write(`"`) + } + out.write(string(b)) + if needToQuote { + out.write(`"`) + } + return out.err +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // Whether to allow messages to contain unknown fields, as opposed to + // failing to unmarshal. + AllowUnknownFields bool + + // A custom URL resolver to use when unmarshaling Any messages from JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + inputValue := json.RawMessage{} + if err := dec.Decode(&inputValue); err != nil { + return err + } + if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil { + return err + } + return checkRequiredFields(pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { + dec := json.NewDecoder(r) + return u.UnmarshalNext(dec, pb) +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + return new(Unmarshaler).UnmarshalNext(dec, pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func Unmarshal(r io.Reader, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(r, pb) +} + +// UnmarshalString will populate the fields of a protocol buffer based +// on a JSON string. This function is lenient and will decode any options +// permutations of the related Marshaler. +func UnmarshalString(str string, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) +} + +// unmarshalValue converts/copies a value into the target. +// prop may be nil. +func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { + targetType := target.Type() + + // Allocate memory for pointer fields. + if targetType.Kind() == reflect.Ptr { + // If input value is "null" and target is a pointer type, then the field should be treated as not set + // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. + _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) + if string(inputValue) == "null" && targetType != reflect.TypeOf(&types.Value{}) && !isJSONPBUnmarshaler { + return nil + } + target.Set(reflect.New(targetType.Elem())) + + return u.unmarshalValue(target.Elem(), inputValue, prop) + } + + if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, []byte(inputValue)) + } + + // Handle well-known types that are not pointers. + if w, ok := target.Addr().Interface().(isWkt); ok { + switch w.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + return u.unmarshalValue(target.Field(0), inputValue, prop) + case "Any": + // Use json.RawMessage pointer type instead of value to support pre-1.8 version. + // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see + // https://github.com/golang/go/issues/14493 + var jsonFields map[string]*json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + val, ok := jsonFields["@type"] + if !ok || val == nil { + return errors.New("Any JSON doesn't have '@type'") + } + + var turl string + if err := json.Unmarshal([]byte(*val), &turl); err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) + } + target.Field(0).SetString(turl) + + var m proto.Message + var err error + if u.AnyResolver != nil { + m, err = u.AnyResolver.Resolve(turl) + } else { + m, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if _, ok := m.(isWkt); ok { + val, ok := jsonFields["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } else { + delete(jsonFields, "@type") + nestedProto, uerr := json.Marshal(jsonFields) + if uerr != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", uerr) + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } + + b, err := proto.Marshal(m) + if err != nil { + return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) + } + target.Field(1).SetBytes(b) + + return nil + case "Duration": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + d, err := time.ParseDuration(unq) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + case "Timestamp": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + t, err := time.Parse(time.RFC3339Nano, unq) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + target.Field(0).SetInt(t.Unix()) + target.Field(1).SetInt(int64(t.Nanosecond())) + return nil + case "Struct": + var m map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &m); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + target.Field(0).Set(reflect.ValueOf(map[string]*types.Value{})) + for k, jv := range m { + pv := &types.Value{} + if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) + } + target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) + } + return nil + case "ListValue": + var s []json.RawMessage + if err := json.Unmarshal(inputValue, &s); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(make([]*types.Value, len(s)))) + for i, sv := range s { + if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { + return err + } + } + return nil + case "Value": + ivStr := string(inputValue) + if ivStr == "null" { + target.Field(0).Set(reflect.ValueOf(&types.Value_NullValue{})) + } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_NumberValue{NumberValue: v})) + } else if v, err := unquote(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_StringValue{StringValue: v})) + } else if v, err := strconv.ParseBool(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_BoolValue{BoolValue: v})) + } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { + lv := &types.ListValue{} + target.Field(0).Set(reflect.ValueOf(&types.Value_ListValue{ListValue: lv})) + return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) + } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { + sv := &types.Struct{} + target.Field(0).Set(reflect.ValueOf(&types.Value_StructValue{StructValue: sv})) + return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) + } else { + return fmt.Errorf("unrecognized type for Value %q", ivStr) + } + return nil + } + } + + if t, ok := target.Addr().Interface().(*time.Time); ok { + ts := &types.Timestamp{} + if err := u.unmarshalValue(reflect.ValueOf(ts).Elem(), inputValue, prop); err != nil { + return err + } + tt, err := types.TimestampFromProto(ts) + if err != nil { + return err + } + *t = tt + return nil + } + + if d, ok := target.Addr().Interface().(*time.Duration); ok { + dur := &types.Duration{} + if err := u.unmarshalValue(reflect.ValueOf(dur).Elem(), inputValue, prop); err != nil { + return err + } + dd, err := types.DurationFromProto(dur) + if err != nil { + return err + } + *d = dd + return nil + } + + // Handle enums, which have an underlying type of int32, + // and may appear as strings. + // The case of an enum appearing as a number is handled + // at the bottom of this function. + if inputValue[0] == '"' && prop != nil && prop.Enum != "" { + vmap := proto.EnumValueMap(prop.Enum) + // Don't need to do unquoting; valid enum names + // are from a limited character set. + s := inputValue[1 : len(inputValue)-1] + n, ok := vmap[string(s)] + if !ok { + return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) + } + if target.Kind() == reflect.Ptr { // proto2 + target.Set(reflect.New(targetType.Elem())) + target = target.Elem() + } + if targetType.Kind() != reflect.Int32 { + return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum) + } + target.SetInt(int64(n)) + return nil + } + + if prop != nil && len(prop.CustomType) > 0 && target.CanAddr() { + if m, ok := target.Addr().Interface().(interface { + UnmarshalJSON([]byte) error + }); ok { + return json.Unmarshal(inputValue, m) + } + } + + // Handle nested messages. + if targetType.Kind() == reflect.Struct { + var jsonFields map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { + // Be liberal in what names we accept; both orig_name and camelName are okay. + fieldNames := acceptedJSONFieldNames(prop) + + vOrig, okOrig := jsonFields[fieldNames.orig] + vCamel, okCamel := jsonFields[fieldNames.camel] + if !okOrig && !okCamel { + return nil, false + } + // If, for some reason, both are present in the data, favour the camelName. + var raw json.RawMessage + if okOrig { + raw = vOrig + delete(jsonFields, fieldNames.orig) + } + if okCamel { + raw = vCamel + delete(jsonFields, fieldNames.camel) + } + return raw, true + } + + sprops := proto.GetProperties(targetType) + for i := 0; i < target.NumField(); i++ { + ft := target.Type().Field(i) + if strings.HasPrefix(ft.Name, "XXX_") { + continue + } + valueForField, ok := consumeField(sprops.Prop[i]) + if !ok { + continue + } + + if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { + return err + } + } + // Check for any oneof fields. + if len(jsonFields) > 0 { + for _, oop := range sprops.OneofTypes { + raw, ok := consumeField(oop.Prop) + if !ok { + continue + } + nv := reflect.New(oop.Type.Elem()) + target.Field(oop.Field).Set(nv) + if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { + return err + } + } + } + // Handle proto2 extensions. + if len(jsonFields) > 0 { + if ep, ok := target.Addr().Interface().(proto.Message); ok { + for _, ext := range proto.RegisteredExtensions(ep) { + name := fmt.Sprintf("[%s]", ext.Name) + raw, ok := jsonFields[name] + if !ok { + continue + } + delete(jsonFields, name) + nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) + if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { + return err + } + if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { + return err + } + } + } + } + if !u.AllowUnknownFields && len(jsonFields) > 0 { + // Pick any field to be the scapegoat. + var f string + for fname := range jsonFields { + f = fname + break + } + return fmt.Errorf("unknown field %q in %v", f, targetType) + } + return nil + } + + // Handle arrays + if targetType.Kind() == reflect.Slice { + if targetType.Elem().Kind() == reflect.Uint8 { + outRef := reflect.New(targetType) + outVal := outRef.Interface() + //CustomType with underlying type []byte + if _, ok := outVal.(interface { + UnmarshalJSON([]byte) error + }); ok { + if err := json.Unmarshal(inputValue, outVal); err != nil { + return err + } + target.Set(outRef.Elem()) + return nil + } + // Special case for encoded bytes. Pre-go1.5 doesn't support unmarshalling + // strings into aliased []byte types. + // https://github.com/golang/go/commit/4302fd0409da5e4f1d71471a6770dacdc3301197 + // https://github.com/golang/go/commit/c60707b14d6be26bf4213114d13070bff00d0b0a + var out []byte + if err := json.Unmarshal(inputValue, &out); err != nil { + return err + } + target.SetBytes(out) + return nil + } + + var slc []json.RawMessage + if err := json.Unmarshal(inputValue, &slc); err != nil { + return err + } + if slc != nil { + l := len(slc) + target.Set(reflect.MakeSlice(targetType, l, l)) + for i := 0; i < l; i++ { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + return err + } + } + } + return nil + } + + // Handle maps (whose keys are always strings) + if targetType.Kind() == reflect.Map { + var mp map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &mp); err != nil { + return err + } + if mp != nil { + target.Set(reflect.MakeMap(targetType)) + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + var kprop *proto.Properties + if prop != nil && prop.MapKeyProp != nil { + kprop = prop.MapKeyProp + } + if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil { + return err + } + } + + if !k.Type().AssignableTo(targetType.Key()) { + k = k.Convert(targetType.Key()) + } + + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + var vprop *proto.Properties + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := u.unmarshalValue(v, raw, vprop); err != nil { + return err + } + target.SetMapIndex(k, v) + } + } + return nil + } + + // Non-finite numbers can be encoded as strings. + isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isFloat { + if num, ok := nonFinite[string(inputValue)]; ok { + target.SetFloat(num) + return nil + } + } + + // integers & floats can be encoded as strings. In this case we drop + // the quotes and proceed as normal. + isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 || + targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 || + targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isNum && strings.HasPrefix(string(inputValue), `"`) { + inputValue = inputValue[1 : len(inputValue)-1] + } + + // Use the encoding/json for parsing other value types. + return json.Unmarshal(inputValue, target.Addr().Interface()) +} + +func unquote(s string) (string, error) { + var ret string + err := json.Unmarshal([]byte(s), &ret) + return ret, err +} + +// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. +func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { + var prop proto.Properties + prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) + if origName || prop.JSONName == "" { + prop.JSONName = prop.OrigName + } + return &prop +} + +type fieldNames struct { + orig, camel string +} + +func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { + opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} + if prop.JSONName != "" { + opts.camel = prop.JSONName + } + return opts +} + +// Writer wrapper inspired by https://blog.golang.org/errors-are-values +type errWriter struct { + writer io.Writer + err error +} + +func (w *errWriter) write(str string) { + if w.err != nil { + return + } + _, w.err = w.writer.Write([]byte(str)) +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. +// +// Numeric keys are sorted in numeric order per +// https://developers.google.com/protocol-buffers/docs/proto#maps. +type mapKeys []reflect.Value + +func (s mapKeys) Len() int { return len(s) } +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s mapKeys) Less(i, j int) bool { + if k := s[i].Kind(); k == s[j].Kind() { + switch k { + case reflect.String: + return s[i].String() < s[j].String() + case reflect.Int32, reflect.Int64: + return s[i].Int() < s[j].Int() + case reflect.Uint32, reflect.Uint64: + return s[i].Uint() < s[j].Uint() + } + } + return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +} + +// checkRequiredFields returns an error if any required field in the given proto message is not set. +// This function is used by both Marshal and Unmarshal. While required fields only exist in a +// proto2 message, a proto3 message can contain proto2 message(s). +func checkRequiredFields(pb proto.Message) error { + // Most well-known type messages do not contain required fields. The "Any" type may contain + // a message that has required fields. + // + // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value + // field in order to transform that into JSON, and that should have returned an error if a + // required field is not set in the embedded message. + // + // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the + // embedded message to store the serialized message in Any.Value field, and that should have + // returned an error if a required field is not set. + if _, ok := pb.(isWkt); ok { + return nil + } + + v := reflect.ValueOf(pb) + // Skip message if it is not a struct pointer. + if v.Kind() != reflect.Ptr { + return nil + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + sfield := v.Type().Field(i) + + if sfield.PkgPath != "" { + // blank PkgPath means the field is exported; skip if not exported + continue + } + + if strings.HasPrefix(sfield.Name, "XXX_") { + continue + } + + // Oneof field is an interface implemented by wrapper structs containing the actual oneof + // field, i.e. an interface containing &T{real_value}. + if sfield.Tag.Get("protobuf_oneof") != "" { + if field.Kind() != reflect.Interface { + continue + } + v := field.Elem() + if v.Kind() != reflect.Ptr || v.IsNil() { + continue + } + v = v.Elem() + if v.Kind() != reflect.Struct || v.NumField() < 1 { + continue + } + field = v.Field(0) + sfield = v.Type().Field(0) + } + + protoTag := sfield.Tag.Get("protobuf") + if protoTag == "" { + continue + } + var prop proto.Properties + prop.Init(sfield.Type, sfield.Name, protoTag, &sfield) + + switch field.Kind() { + case reflect.Map: + if field.IsNil() { + continue + } + // Check each map value. + keys := field.MapKeys() + for _, k := range keys { + v := field.MapIndex(k) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Slice: + // Handle non-repeated type, e.g. bytes. + if !prop.Repeated { + if prop.Required && field.IsNil() { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + + // Handle repeated type. + if field.IsNil() { + continue + } + // Check each slice item. + for i := 0; i < field.Len(); i++ { + v := field.Index(i) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Ptr: + if field.IsNil() { + if prop.Required { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + if err := checkRequiredFieldsInValue(field); err != nil { + return err + } + } + } + + // Handle proto2 extensions. + for _, ext := range proto.RegisteredExtensions(pb) { + if !proto.HasExtension(pb, ext) { + continue + } + ep, err := proto.GetExtension(pb, ext) + if err != nil { + return err + } + err = checkRequiredFieldsInValue(reflect.ValueOf(ep)) + if err != nil { + return err + } + } + + return nil +} + +func checkRequiredFieldsInValue(v reflect.Value) error { + if v.Type().Implements(messageType) { + return checkRequiredFields(v.Interface().(proto.Message)) + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile new file mode 100644 index 000000000..3496dc99d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile @@ -0,0 +1,36 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + go install github.com/gogo/protobuf/protoc-gen-gostring + protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto + protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 000000000..a85bf1984 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go new file mode 100644 index 000000000..18b2a3318 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -0,0 +1,2865 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} + +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} + +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} + +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} + +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} + +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} + +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} + +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} + +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") +} + +func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) } + +var fileDescriptor_308767df5ffe18af = []byte{ + // 2522 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66, + 0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe, + 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, + 0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80, + 0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66, + 0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f, + 0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63, + 0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e, + 0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec, + 0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2, + 0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e, + 0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2, + 0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39, + 0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd, + 0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41, + 0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22, + 0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa, + 0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4, + 0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7, + 0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d, + 0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e, + 0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12, + 0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d, + 0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2, + 0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1, + 0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba, + 0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60, + 0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77, + 0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24, + 0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06, + 0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a, + 0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92, + 0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6, + 0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c, + 0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7, + 0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f, + 0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd, + 0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07, + 0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95, + 0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77, + 0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e, + 0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8, + 0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69, + 0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0, + 0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05, + 0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46, + 0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f, + 0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c, + 0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3, + 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5, + 0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95, + 0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a, + 0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07, + 0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2, + 0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f, + 0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42, + 0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e, + 0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4, + 0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90, + 0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae, + 0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d, + 0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e, + 0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58, + 0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9, + 0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f, + 0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4, + 0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15, + 0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf, + 0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba, + 0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6, + 0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01, + 0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73, + 0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb, + 0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1, + 0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7, + 0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f, + 0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78, + 0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a, + 0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba, + 0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49, + 0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48, + 0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee, + 0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0, + 0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a, + 0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63, + 0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2, + 0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59, + 0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35, + 0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd, + 0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee, + 0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b, + 0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf, + 0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8, + 0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31, + 0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53, + 0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8, + 0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8, + 0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d, + 0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81, + 0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8, + 0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f, + 0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9, + 0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03, + 0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff, + 0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d, + 0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0, + 0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8, + 0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4, + 0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a, + 0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86, + 0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71, + 0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76, + 0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35, + 0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b, + 0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7, + 0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e, + 0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd, + 0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01, + 0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55, + 0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41, + 0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79, + 0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7, + 0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c, + 0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd, + 0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99, + 0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88, + 0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95, + 0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed, + 0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea, + 0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d, + 0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee, + 0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4, + 0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25, + 0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0, + 0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97, + 0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94, + 0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22, + 0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43, + 0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80, + 0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd, + 0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77, + 0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75, + 0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4, + 0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11, + 0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb, + 0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c, + 0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0, + 0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d, + 0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07, + 0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39, + 0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80, + 0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42, + 0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c, + 0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8, + 0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7, + 0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00, + 0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go new file mode 100644 index 000000000..165b2110d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -0,0 +1,752 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + proto "github.com/gogo/protobuf/proto" + math "math" + reflect "reflect" + sort "sort" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *FileDescriptorSet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.FileDescriptorSet{") + if this.File != nil { + s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&descriptor.FileDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Package != nil { + s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") + } + if this.Dependency != nil { + s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") + } + if this.PublicDependency != nil { + s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") + } + if this.WeakDependency != nil { + s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") + } + if this.MessageType != nil { + s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.Service != nil { + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceCodeInfo != nil { + s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") + } + if this.Syntax != nil { + s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.DescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Field != nil { + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.NestedType != nil { + s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.ExtensionRange != nil { + s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") + } + if this.OneofDecl != nil { + s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ExtensionRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.FieldDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Label != nil { + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") + } + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") + } + if this.TypeName != nil { + s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") + } + if this.Extendee != nil { + s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") + } + if this.DefaultValue != nil { + s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") + } + if this.OneofIndex != nil { + s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") + } + if this.JsonName != nil { + s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.OneofDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.EnumDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumValueDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.ServiceDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Method != nil { + s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&descriptor.MethodDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.InputType != nil { + s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") + } + if this.OutputType != nil { + s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ClientStreaming != nil { + s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") + } + if this.ServerStreaming != nil { + s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 25) + s = append(s, "&descriptor.FileOptions{") + if this.JavaPackage != nil { + s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") + } + if this.JavaOuterClassname != nil { + s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") + } + if this.JavaMultipleFiles != nil { + s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") + } + if this.JavaGenerateEqualsAndHash != nil { + s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") + } + if this.JavaStringCheckUtf8 != nil { + s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") + } + if this.OptimizeFor != nil { + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") + } + if this.GoPackage != nil { + s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") + } + if this.CcGenericServices != nil { + s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") + } + if this.JavaGenericServices != nil { + s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") + } + if this.PyGenericServices != nil { + s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") + } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.CcEnableArenas != nil { + s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") + } + if this.ObjcClassPrefix != nil { + s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") + } + if this.CsharpNamespace != nil { + s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") + } + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") + } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") + } + if this.PhpMetadataNamespace != nil { + s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n") + } + if this.RubyPackage != nil { + s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.MessageOptions{") + if this.MessageSetWireFormat != nil { + s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") + } + if this.NoStandardDescriptorAccessor != nil { + s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.MapEntry != nil { + s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.FieldOptions{") + if this.Ctype != nil { + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") + } + if this.Packed != nil { + s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") + } + if this.Jstype != nil { + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") + } + if this.Lazy != nil { + s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.Weak != nil { + s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumOptions{") + if this.AllowAlias != nil { + s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumValueOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.ServiceOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.MethodOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.UninterpretedOption{") + if this.Name != nil { + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + } + if this.IdentifierValue != nil { + s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") + } + if this.PositiveIntValue != nil { + s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") + } + if this.NegativeIntValue != nil { + s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") + } + if this.DoubleValue != nil { + s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") + } + if this.StringValue != nil { + s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") + } + if this.AggregateValue != nil { + s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption_NamePart) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.UninterpretedOption_NamePart{") + if this.NamePart != nil { + s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") + } + if this.IsExtension != nil { + s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.SourceCodeInfo{") + if this.Location != nil { + s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo_Location) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.SourceCodeInfo_Location{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Span != nil { + s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") + } + if this.LeadingComments != nil { + s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") + } + if this.TrailingComments != nil { + s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") + } + if this.LeadingDetachedComments != nil { + s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDescriptor(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { + e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) + if e == nil { + return "nil" + } + s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "})" + return s +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go new file mode 100644 index 000000000..e0846a357 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -0,0 +1,390 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package descriptor + +import ( + "strings" +) + +func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { + if !msg.GetOptions().GetMapEntry() { + return nil, nil + } + return msg.GetField()[0], msg.GetField()[1] +} + +func dotToUnderscore(r rune) rune { + if r == '.' { + return '_' + } + return r +} + +func (field *FieldDescriptorProto) WireType() (wire int) { + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE: + return 1 + case FieldDescriptorProto_TYPE_FLOAT: + return 5 + case FieldDescriptorProto_TYPE_INT64: + return 0 + case FieldDescriptorProto_TYPE_UINT64: + return 0 + case FieldDescriptorProto_TYPE_INT32: + return 0 + case FieldDescriptorProto_TYPE_UINT32: + return 0 + case FieldDescriptorProto_TYPE_FIXED64: + return 1 + case FieldDescriptorProto_TYPE_FIXED32: + return 5 + case FieldDescriptorProto_TYPE_BOOL: + return 0 + case FieldDescriptorProto_TYPE_STRING: + return 2 + case FieldDescriptorProto_TYPE_GROUP: + return 2 + case FieldDescriptorProto_TYPE_MESSAGE: + return 2 + case FieldDescriptorProto_TYPE_BYTES: + return 2 + case FieldDescriptorProto_TYPE_ENUM: + return 0 + case FieldDescriptorProto_TYPE_SFIXED32: + return 5 + case FieldDescriptorProto_TYPE_SFIXED64: + return 1 + case FieldDescriptorProto_TYPE_SINT32: + return 0 + case FieldDescriptorProto_TYPE_SINT64: + return 0 + } + panic("unreachable") +} + +func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey() []byte { + x := field.GetKeyUint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { + msg := desc.GetMessage(packageName, messageName) + if msg == nil { + return nil + } + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) + if nes != nil { + return nes + } + } + return nil +} + +func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) + if res != nil { + return res + } + } + return nil +} + +func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + } + return nil +} + +func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + if msg.GetName()+"."+nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + } + } + return false +} + +func (msg *DescriptorProto) IsExtendable() bool { + return len(msg.GetExtensionRange()) > 0 +} + +func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetName() == fieldName { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetNumber() == fieldNum { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", "" + } + field := parent.GetFieldDescriptor(fieldName) + if field == nil { + var extPackageName string + extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) + if field == nil { + return "", "" + } + packageName = extPackageName + } + typeNames := strings.Split(field.GetTypeName(), ".") + if len(typeNames) == 1 { + msg := desc.GetMessage(packageName, typeName) + if msg == nil { + return "", "" + } + return packageName, msg.GetName() + } + if len(typeNames) > 2 { + for i := 1; i < len(typeNames)-1; i++ { + packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") + typeName = strings.Join(typeNames[len(typeNames)-i:], ".") + msg := desc.GetMessage(packageName, typeName) + if msg != nil { + typeNames := strings.Split(msg.GetName(), ".") + if len(typeNames) == 1 { + return packageName, msg.GetName() + } + return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] + } + } + } + return "", "" +} + +func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, enum := range file.GetEnumType() { + if enum.GetName() == typeName { + return enum + } + } + } + return nil +} + +func (f *FieldDescriptorProto) IsEnum() bool { + return *f.Type == FieldDescriptorProto_TYPE_ENUM +} + +func (f *FieldDescriptorProto) IsMessage() bool { + return *f.Type == FieldDescriptorProto_TYPE_MESSAGE +} + +func (f *FieldDescriptorProto) IsBytes() bool { + return *f.Type == FieldDescriptorProto_TYPE_BYTES +} + +func (f *FieldDescriptorProto) IsRepeated() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED +} + +func (f *FieldDescriptorProto) IsString() bool { + return *f.Type == FieldDescriptorProto_TYPE_STRING +} + +func (f *FieldDescriptorProto) IsBool() bool { + return *f.Type == FieldDescriptorProto_TYPE_BOOL +} + +func (f *FieldDescriptorProto) IsRequired() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED +} + +func (f *FieldDescriptorProto) IsPacked() bool { + return f.Options != nil && f.GetOptions().GetPacked() +} + +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + +func (m *DescriptorProto) HasExtension() bool { + return len(m.ExtensionRange) > 0 +} diff --git a/vendor/github.com/gogo/protobuf/types/any.go b/vendor/github.com/gogo/protobuf/types/any.go new file mode 100644 index 000000000..df4787de3 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/any.go @@ -0,0 +1,140 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/gogo/protobuf/proto" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func EmptyAny(any *Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = EmptyAny(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *Any, pb proto.Message) bool { + // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), + // but it avoids scanning TypeUrl for the slash. + if any == nil { + return false + } + name := proto.MessageName(pb) + prefix := len(any.TypeUrl) - len(name) + return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name +} diff --git a/vendor/github.com/gogo/protobuf/types/any.pb.go b/vendor/github.com/gogo/protobuf/types/any.pb.go new file mode 100644 index 000000000..98e269d54 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/any.pb.go @@ -0,0 +1,697 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/any.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Any) Reset() { *m = Any{} } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_b53526c13ae22eb4, []int{0} +} +func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(m, src) +} +func (m *Any) XXX_Size() int { + return m.Size() +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) +} + +var xxx_messageInfo_Any proto.InternalMessageInfo + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (*Any) XXX_MessageName() string { + return "google.protobuf.Any" +} +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } + +var fileDescriptor_b53526c13ae22eb4 = []byte{ + // 211 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xaa, 0xbf, 0xf1, 0x50, 0x8e, + 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x1f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24, + 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, + 0x24, 0xc7, 0xf0, 0x01, 0x24, 0xfe, 0x58, 0x8e, 0xf1, 0xc4, 0x63, 0x39, 0x46, 0x2e, 0xe1, 0xe4, + 0xfc, 0x5c, 0x3d, 0x34, 0xeb, 0x9d, 0x38, 0x1c, 0xf3, 0x2a, 0x03, 0x40, 0x9c, 0x00, 0xc6, 0x28, + 0x56, 0x90, 0x8d, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x94, + 0x06, 0x40, 0x95, 0xea, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, 0x94, + 0x25, 0xb1, 0x81, 0xcd, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x81, 0x82, 0xd3, 0xed, + 0x00, 0x00, 0x00, +} + +func (this *Any) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Any) + if !ok { + that2, ok := that.(Any) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.TypeUrl != that1.TypeUrl { + if this.TypeUrl < that1.TypeUrl { + return -1 + } + return 1 + } + if c := bytes.Compare(this.Value, that1.Value); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Any) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Any) + if !ok { + that2, ok := that.(Any) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TypeUrl != that1.TypeUrl { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Any) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Any{") + s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringAny(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Any) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Any) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Any) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintAny(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = encodeVarintAny(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintAny(dAtA []byte, offset int, v uint64) int { + offset -= sovAny(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedAny(r randyAny, easy bool) *Any { + this := &Any{} + this.TypeUrl = string(randStringAny(r)) + v1 := r.Intn(100) + this.Value = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedAny(r, 3) + } + return this +} + +type randyAny interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneAny(r randyAny) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringAny(r randyAny) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneAny(r) + } + return string(tmps) +} +func randUnrecognizedAny(r randyAny, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldAny(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldAny(dAtA []byte, r randyAny, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateAny(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateAny(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateAny(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Any) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + sovAny(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovAny(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovAny(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAny(x uint64) (n int) { + return sovAny(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Any) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Any{`, + `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringAny(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Any) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Any: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAny + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAny + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAny + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthAny + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAny(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAny + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAny + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAny(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAny + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupAny + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthAny + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthAny = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAny = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupAny = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/api.pb.go b/vendor/github.com/gogo/protobuf/types/api.pb.go new file mode 100644 index 000000000..58bf4b53b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/api.pb.go @@ -0,0 +1,2143 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/api.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Api is a light-weight descriptor for an API Interface. +// +// Interfaces are also described as "protocol buffer services" in some contexts, +// such as by the "service" keyword in a .proto file, but they are different +// from API Services, which represent a concrete implementation of an interface +// as opposed to simply a description of methods and bindings. They are also +// sometimes simply referred to as "APIs" in other contexts, such as the name of +// this message itself. See https://cloud.google.com/apis/design/glossary for +// detailed terminology. +type Api struct { + // The fully qualified name of this interface, including package name + // followed by the interface's simple name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The methods of this interface, in unspecified order. + Methods []*Method `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` + // Any metadata attached to the interface. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + // A version string for this interface. If specified, must have the form + // `major-version.minor-version`, as in `1.10`. If the minor version is + // omitted, it defaults to zero. If the entire version field is empty, the + // major version is derived from the package name, as outlined below. If the + // field is not empty, the version in the package name will be verified to be + // consistent with what is provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version number + // indicates a breaking change and the minor version an additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // interface, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, non-GA interfaces. + // + // + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + // Source context for the protocol buffer service represented by this + // message. + SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // Included interfaces. See [Mixin][]. + Mixins []*Mixin `protobuf:"bytes,6,rep,name=mixins,proto3" json:"mixins,omitempty"` + // The source syntax of the service. + Syntax Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Api) Reset() { *m = Api{} } +func (*Api) ProtoMessage() {} +func (*Api) Descriptor() ([]byte, []int) { + return fileDescriptor_a2ec32096296c143, []int{0} +} +func (m *Api) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Api) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Api.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Api) XXX_Merge(src proto.Message) { + xxx_messageInfo_Api.Merge(m, src) +} +func (m *Api) XXX_Size() int { + return m.Size() +} +func (m *Api) XXX_DiscardUnknown() { + xxx_messageInfo_Api.DiscardUnknown(m) +} + +var xxx_messageInfo_Api proto.InternalMessageInfo + +func (m *Api) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Api) GetMethods() []*Method { + if m != nil { + return m.Methods + } + return nil +} + +func (m *Api) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Api) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Api) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Api) GetMixins() []*Mixin { + if m != nil { + return m.Mixins + } + return nil +} + +func (m *Api) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Api) XXX_MessageName() string { + return "google.protobuf.Api" +} + +// Method represents a method of an API interface. +type Method struct { + // The simple name of this method. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A URL of the input message type. + RequestTypeUrl string `protobuf:"bytes,2,opt,name=request_type_url,json=requestTypeUrl,proto3" json:"request_type_url,omitempty"` + // If true, the request is streamed. + RequestStreaming bool `protobuf:"varint,3,opt,name=request_streaming,json=requestStreaming,proto3" json:"request_streaming,omitempty"` + // The URL of the output message type. + ResponseTypeUrl string `protobuf:"bytes,4,opt,name=response_type_url,json=responseTypeUrl,proto3" json:"response_type_url,omitempty"` + // If true, the response is streamed. + ResponseStreaming bool `protobuf:"varint,5,opt,name=response_streaming,json=responseStreaming,proto3" json:"response_streaming,omitempty"` + // Any metadata attached to the method. + Options []*Option `protobuf:"bytes,6,rep,name=options,proto3" json:"options,omitempty"` + // The source syntax of this method. + Syntax Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Method) Reset() { *m = Method{} } +func (*Method) ProtoMessage() {} +func (*Method) Descriptor() ([]byte, []int) { + return fileDescriptor_a2ec32096296c143, []int{1} +} +func (m *Method) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Method) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Method.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Method) XXX_Merge(src proto.Message) { + xxx_messageInfo_Method.Merge(m, src) +} +func (m *Method) XXX_Size() int { + return m.Size() +} +func (m *Method) XXX_DiscardUnknown() { + xxx_messageInfo_Method.DiscardUnknown(m) +} + +var xxx_messageInfo_Method proto.InternalMessageInfo + +func (m *Method) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Method) GetRequestTypeUrl() string { + if m != nil { + return m.RequestTypeUrl + } + return "" +} + +func (m *Method) GetRequestStreaming() bool { + if m != nil { + return m.RequestStreaming + } + return false +} + +func (m *Method) GetResponseTypeUrl() string { + if m != nil { + return m.ResponseTypeUrl + } + return "" +} + +func (m *Method) GetResponseStreaming() bool { + if m != nil { + return m.ResponseStreaming + } + return false +} + +func (m *Method) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Method) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Method) XXX_MessageName() string { + return "google.protobuf.Method" +} + +// Declares an API Interface to be included in this interface. The including +// interface must redeclare all the methods from the included interface, but +// documentation and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including interface plus the [root][] path if +// specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after inherting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to `v2`. +// +// If the `root` field in the mixin is specified, it should be a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +type Mixin struct { + // The fully qualified name of the interface which is included. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // If non-empty specifies a path under which inherited HTTP paths + // are rooted. + Root string `protobuf:"bytes,2,opt,name=root,proto3" json:"root,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Mixin) Reset() { *m = Mixin{} } +func (*Mixin) ProtoMessage() {} +func (*Mixin) Descriptor() ([]byte, []int) { + return fileDescriptor_a2ec32096296c143, []int{2} +} +func (m *Mixin) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Mixin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Mixin.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Mixin) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mixin.Merge(m, src) +} +func (m *Mixin) XXX_Size() int { + return m.Size() +} +func (m *Mixin) XXX_DiscardUnknown() { + xxx_messageInfo_Mixin.DiscardUnknown(m) +} + +var xxx_messageInfo_Mixin proto.InternalMessageInfo + +func (m *Mixin) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Mixin) GetRoot() string { + if m != nil { + return m.Root + } + return "" +} + +func (*Mixin) XXX_MessageName() string { + return "google.protobuf.Mixin" +} +func init() { + proto.RegisterType((*Api)(nil), "google.protobuf.Api") + proto.RegisterType((*Method)(nil), "google.protobuf.Method") + proto.RegisterType((*Mixin)(nil), "google.protobuf.Mixin") +} + +func init() { proto.RegisterFile("google/protobuf/api.proto", fileDescriptor_a2ec32096296c143) } + +var fileDescriptor_a2ec32096296c143 = []byte{ + // 467 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x91, 0x31, 0x6f, 0x13, 0x31, + 0x14, 0xc7, 0xeb, 0xbb, 0xe4, 0x52, 0x5c, 0x91, 0x82, 0x91, 0xc0, 0x64, 0xb0, 0x4e, 0x15, 0xc3, + 0x09, 0xc4, 0x45, 0x94, 0x4f, 0xd0, 0x20, 0xd4, 0x01, 0x21, 0xa2, 0x0b, 0x08, 0x89, 0x25, 0x4a, + 0x83, 0x09, 0x96, 0xee, 0x6c, 0x63, 0x3b, 0x90, 0x4c, 0xf0, 0x59, 0x98, 0x10, 0x23, 0xdf, 0x80, + 0xad, 0x23, 0x23, 0x23, 0xb9, 0x2e, 0x8c, 0x1d, 0x19, 0x91, 0x7d, 0xe7, 0xa6, 0x5c, 0x83, 0x04, + 0x9b, 0xdf, 0xfb, 0xff, 0xfc, 0xf7, 0x7b, 0x7f, 0xc3, 0x9b, 0x33, 0x21, 0x66, 0x39, 0xed, 0x4b, + 0x25, 0x8c, 0x38, 0x9a, 0xbf, 0xea, 0x4f, 0x24, 0x4b, 0x5d, 0x81, 0x76, 0x2b, 0x29, 0xf5, 0x52, + 0xef, 0x56, 0x93, 0xd5, 0x62, 0xae, 0xa6, 0x74, 0x3c, 0x15, 0xdc, 0xd0, 0x85, 0xa9, 0xc0, 0x5e, + 0xaf, 0x49, 0x99, 0xa5, 0xac, 0x4d, 0xf6, 0xbe, 0x06, 0x30, 0x3c, 0x90, 0x0c, 0x21, 0xd8, 0xe2, + 0x93, 0x82, 0x62, 0x10, 0x83, 0xe4, 0x52, 0xe6, 0xce, 0xe8, 0x1e, 0xec, 0x14, 0xd4, 0xbc, 0x16, + 0x2f, 0x35, 0x0e, 0xe2, 0x30, 0xd9, 0xd9, 0xbf, 0x91, 0x36, 0x06, 0x48, 0x1f, 0x3b, 0x3d, 0xf3, + 0x9c, 0xbd, 0x22, 0xa4, 0x61, 0x82, 0x6b, 0x1c, 0xfe, 0xe5, 0xca, 0x13, 0xa7, 0x67, 0x9e, 0x43, + 0x18, 0x76, 0xde, 0x52, 0xa5, 0x99, 0xe0, 0xb8, 0xe5, 0x1e, 0xf7, 0x25, 0x7a, 0x08, 0xbb, 0x7f, + 0xee, 0x83, 0xdb, 0x31, 0x48, 0x76, 0xf6, 0xc9, 0x05, 0xcf, 0x91, 0xc3, 0x1e, 0x54, 0x54, 0x76, + 0x59, 0x9f, 0x2f, 0x51, 0x0a, 0xa3, 0x82, 0x2d, 0x18, 0xd7, 0x38, 0x72, 0x23, 0x5d, 0xbf, 0xb8, + 0x85, 0x95, 0xb3, 0x9a, 0x42, 0x7d, 0x18, 0xe9, 0x25, 0x37, 0x93, 0x05, 0xee, 0xc4, 0x20, 0xe9, + 0x6e, 0x58, 0x61, 0xe4, 0xe4, 0xac, 0xc6, 0xf6, 0xbe, 0x04, 0x30, 0xaa, 0x82, 0xd8, 0x18, 0x63, + 0x02, 0xaf, 0x28, 0xfa, 0x66, 0x4e, 0xb5, 0x19, 0xdb, 0xe0, 0xc7, 0x73, 0x95, 0xe3, 0xc0, 0xe9, + 0xdd, 0xba, 0xff, 0x74, 0x29, 0xe9, 0x33, 0x95, 0xa3, 0x3b, 0xf0, 0xaa, 0x27, 0xb5, 0x51, 0x74, + 0x52, 0x30, 0x3e, 0xc3, 0x61, 0x0c, 0x92, 0xed, 0xcc, 0x5b, 0x8c, 0x7c, 0x1f, 0xdd, 0xb6, 0xb0, + 0x96, 0x82, 0x6b, 0xba, 0xf6, 0xad, 0x12, 0xdc, 0xf5, 0x82, 0x37, 0xbe, 0x0b, 0xd1, 0x19, 0xbb, + 0x76, 0x6e, 0x3b, 0xe7, 0x33, 0x97, 0xb5, 0xf5, 0xb9, 0x5f, 0x8c, 0xfe, 0xf1, 0x17, 0xff, 0x3b, + 0xb4, 0x3e, 0x6c, 0xbb, 0xd8, 0x37, 0x46, 0x86, 0x60, 0x4b, 0x09, 0x61, 0xea, 0x98, 0xdc, 0x79, + 0xf0, 0xfe, 0xfb, 0x8a, 0x6c, 0x9d, 0xae, 0x08, 0xf8, 0xb5, 0x22, 0xe0, 0x43, 0x49, 0xc0, 0xa7, + 0x92, 0x80, 0xe3, 0x92, 0x80, 0x6f, 0x25, 0x01, 0x3f, 0x4a, 0x02, 0x7e, 0x96, 0x64, 0xeb, 0xd4, + 0xf6, 0x4f, 0x08, 0x38, 0x3e, 0x21, 0x00, 0x5e, 0x9b, 0x8a, 0xa2, 0x39, 0xc6, 0x60, 0xfb, 0x40, + 0xb2, 0xa1, 0x2d, 0x86, 0xe0, 0x45, 0xdb, 0xe6, 0xa6, 0x3f, 0x06, 0xe1, 0xe1, 0x70, 0xf0, 0x39, + 0x20, 0x87, 0x15, 0x3a, 0xf4, 0x13, 0x3f, 0xa7, 0x79, 0xfe, 0x88, 0x8b, 0x77, 0xdc, 0xc6, 0xa8, + 0x8f, 0x22, 0xe7, 0x71, 0xff, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x64, 0x40, 0x40, 0xa1, + 0x03, 0x00, 0x00, +} + +func (this *Api) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Api) + if !ok { + that2, ok := that.(Api) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Methods) != len(that1.Methods) { + if len(this.Methods) < len(that1.Methods) { + return -1 + } + return 1 + } + for i := range this.Methods { + if c := this.Methods[i].Compare(that1.Methods[i]); c != 0 { + return c + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.Version != that1.Version { + if this.Version < that1.Version { + return -1 + } + return 1 + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if len(this.Mixins) != len(that1.Mixins) { + if len(this.Mixins) < len(that1.Mixins) { + return -1 + } + return 1 + } + for i := range this.Mixins { + if c := this.Mixins[i].Compare(that1.Mixins[i]); c != 0 { + return c + } + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Method) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Method) + if !ok { + that2, ok := that.(Method) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.RequestTypeUrl != that1.RequestTypeUrl { + if this.RequestTypeUrl < that1.RequestTypeUrl { + return -1 + } + return 1 + } + if this.RequestStreaming != that1.RequestStreaming { + if !this.RequestStreaming { + return -1 + } + return 1 + } + if this.ResponseTypeUrl != that1.ResponseTypeUrl { + if this.ResponseTypeUrl < that1.ResponseTypeUrl { + return -1 + } + return 1 + } + if this.ResponseStreaming != that1.ResponseStreaming { + if !this.ResponseStreaming { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Mixin) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Mixin) + if !ok { + that2, ok := that.(Mixin) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.Root != that1.Root { + if this.Root < that1.Root { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Api) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Api) + if !ok { + that2, ok := that.(Api) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Methods) != len(that1.Methods) { + return false + } + for i := range this.Methods { + if !this.Methods[i].Equal(that1.Methods[i]) { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.Version != that1.Version { + return false + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if len(this.Mixins) != len(that1.Mixins) { + return false + } + for i := range this.Mixins { + if !this.Mixins[i].Equal(that1.Mixins[i]) { + return false + } + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Method) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Method) + if !ok { + that2, ok := that.(Method) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.RequestTypeUrl != that1.RequestTypeUrl { + return false + } + if this.RequestStreaming != that1.RequestStreaming { + return false + } + if this.ResponseTypeUrl != that1.ResponseTypeUrl { + return false + } + if this.ResponseStreaming != that1.ResponseStreaming { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Mixin) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Mixin) + if !ok { + that2, ok := that.(Mixin) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Root != that1.Root { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Api) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&types.Api{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Methods != nil { + s = append(s, "Methods: "+fmt.Sprintf("%#v", this.Methods)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "Version: "+fmt.Sprintf("%#v", this.Version)+",\n") + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + if this.Mixins != nil { + s = append(s, "Mixins: "+fmt.Sprintf("%#v", this.Mixins)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Method) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&types.Method{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "RequestTypeUrl: "+fmt.Sprintf("%#v", this.RequestTypeUrl)+",\n") + s = append(s, "RequestStreaming: "+fmt.Sprintf("%#v", this.RequestStreaming)+",\n") + s = append(s, "ResponseTypeUrl: "+fmt.Sprintf("%#v", this.ResponseTypeUrl)+",\n") + s = append(s, "ResponseStreaming: "+fmt.Sprintf("%#v", this.ResponseStreaming)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Mixin) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Mixin{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Root: "+fmt.Sprintf("%#v", this.Root)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringApi(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Api) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Api) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Api) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x38 + } + if len(m.Mixins) > 0 { + for iNdEx := len(m.Mixins) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Mixins[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.SourceContext != nil { + { + size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintApi(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Methods) > 0 { + for iNdEx := len(m.Methods) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Methods[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Method) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Method) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Method) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x38 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.ResponseStreaming { + i-- + if m.ResponseStreaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.ResponseTypeUrl) > 0 { + i -= len(m.ResponseTypeUrl) + copy(dAtA[i:], m.ResponseTypeUrl) + i = encodeVarintApi(dAtA, i, uint64(len(m.ResponseTypeUrl))) + i-- + dAtA[i] = 0x22 + } + if m.RequestStreaming { + i-- + if m.RequestStreaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.RequestTypeUrl) > 0 { + i -= len(m.RequestTypeUrl) + copy(dAtA[i:], m.RequestTypeUrl) + i = encodeVarintApi(dAtA, i, uint64(len(m.RequestTypeUrl))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Mixin) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mixin) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Mixin) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Root) > 0 { + i -= len(m.Root) + copy(dAtA[i:], m.Root) + i = encodeVarintApi(dAtA, i, uint64(len(m.Root))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintApi(dAtA []byte, offset int, v uint64) int { + offset -= sovApi(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedApi(r randyApi, easy bool) *Api { + this := &Api{} + this.Name = string(randStringApi(r)) + if r.Intn(5) != 0 { + v1 := r.Intn(5) + this.Methods = make([]*Method, v1) + for i := 0; i < v1; i++ { + this.Methods[i] = NewPopulatedMethod(r, easy) + } + } + if r.Intn(5) != 0 { + v2 := r.Intn(5) + this.Options = make([]*Option, v2) + for i := 0; i < v2; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.Version = string(randStringApi(r)) + if r.Intn(5) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + if r.Intn(5) != 0 { + v3 := r.Intn(5) + this.Mixins = make([]*Mixin, v3) + for i := 0; i < v3; i++ { + this.Mixins[i] = NewPopulatedMixin(r, easy) + } + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 8) + } + return this +} + +func NewPopulatedMethod(r randyApi, easy bool) *Method { + this := &Method{} + this.Name = string(randStringApi(r)) + this.RequestTypeUrl = string(randStringApi(r)) + this.RequestStreaming = bool(bool(r.Intn(2) == 0)) + this.ResponseTypeUrl = string(randStringApi(r)) + this.ResponseStreaming = bool(bool(r.Intn(2) == 0)) + if r.Intn(5) != 0 { + v4 := r.Intn(5) + this.Options = make([]*Option, v4) + for i := 0; i < v4; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 8) + } + return this +} + +func NewPopulatedMixin(r randyApi, easy bool) *Mixin { + this := &Mixin{} + this.Name = string(randStringApi(r)) + this.Root = string(randStringApi(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 3) + } + return this +} + +type randyApi interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneApi(r randyApi) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringApi(r randyApi) string { + v5 := r.Intn(100) + tmps := make([]rune, v5) + for i := 0; i < v5; i++ { + tmps[i] = randUTF8RuneApi(r) + } + return string(tmps) +} +func randUnrecognizedApi(r randyApi, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldApi(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldApi(dAtA []byte, r randyApi, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + v6 := r.Int63() + if r.Intn(2) == 0 { + v6 *= -1 + } + dAtA = encodeVarintPopulateApi(dAtA, uint64(v6)) + case 1: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateApi(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateApi(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Api) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Methods) > 0 { + for _, e := range m.Methods { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Mixins) > 0 { + for _, e := range m.Mixins { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Syntax != 0 { + n += 1 + sovApi(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Method) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.RequestTypeUrl) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.RequestStreaming { + n += 2 + } + l = len(m.ResponseTypeUrl) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.ResponseStreaming { + n += 2 + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Syntax != 0 { + n += 1 + sovApi(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Mixin) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Root) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovApi(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozApi(x uint64) (n int) { + return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Api) String() string { + if this == nil { + return "nil" + } + repeatedStringForMethods := "[]*Method{" + for _, f := range this.Methods { + repeatedStringForMethods += strings.Replace(f.String(), "Method", "Method", 1) + "," + } + repeatedStringForMethods += "}" + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(fmt.Sprintf("%v", f), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + repeatedStringForMixins := "[]*Mixin{" + for _, f := range this.Mixins { + repeatedStringForMixins += strings.Replace(f.String(), "Mixin", "Mixin", 1) + "," + } + repeatedStringForMixins += "}" + s := strings.Join([]string{`&Api{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Methods:` + repeatedStringForMethods + `,`, + `Options:` + repeatedStringForOptions + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Mixins:` + repeatedStringForMixins + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Method) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(fmt.Sprintf("%v", f), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Method{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RequestTypeUrl:` + fmt.Sprintf("%v", this.RequestTypeUrl) + `,`, + `RequestStreaming:` + fmt.Sprintf("%v", this.RequestStreaming) + `,`, + `ResponseTypeUrl:` + fmt.Sprintf("%v", this.ResponseTypeUrl) + `,`, + `ResponseStreaming:` + fmt.Sprintf("%v", this.ResponseStreaming) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Mixin) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mixin{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Root:` + fmt.Sprintf("%v", this.Root) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringApi(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Api) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Api: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Api: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Methods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Methods = append(m.Methods, &Method{}) + if err := m.Methods[len(m.Methods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mixins", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mixins = append(m.Mixins, &Mixin{}) + if err := m.Mixins[len(m.Mixins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Method) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Method: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Method: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestTypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestTypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestStreaming", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RequestStreaming = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseTypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResponseTypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseStreaming", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ResponseStreaming = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mixin) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mixin: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mixin: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Root = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipApi(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthApi + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupApi + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthApi + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupApi = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/doc.go b/vendor/github.com/gogo/protobuf/types/doc.go new file mode 100644 index 000000000..ff2810af1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package types contains code for interacting with well-known types. +*/ +package types diff --git a/vendor/github.com/gogo/protobuf/types/duration.go b/vendor/github.com/gogo/protobuf/types/duration.go new file mode 100644 index 000000000..979b8e78a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func DurationFromProto(p *Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) * time.Nanosecond + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func DurationProto(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/types/duration.pb.go b/vendor/github.com/gogo/protobuf/types/duration.pb.go new file mode 100644 index 000000000..3959f0669 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/duration.pb.go @@ -0,0 +1,520 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/duration.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_23597b2ebd7ac6c5, []int{0} +} +func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) +} +func (m *Duration) XXX_Size() int { + return m.Size() +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func (*Duration) XXX_MessageName() string { + return "google.protobuf.Duration" +} +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } + +var fileDescriptor_23597b2ebd7ac6c5 = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0x7f, 0xe3, 0xa1, 0x1c, + 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, + 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x78, 0x24, 0xc7, 0xb8, 0xe2, + 0xb1, 0x1c, 0xe3, 0x89, 0xc7, 0x72, 0x8c, 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x56, 0x3b, + 0xf1, 0xc2, 0x2c, 0x0e, 0x00, 0x89, 0x04, 0x30, 0x46, 0xb1, 0x96, 0x54, 0x16, 0xa4, 0x16, 0xff, + 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0xa2, 0x25, 0x00, + 0xaa, 0x45, 0x2f, 0x3c, 0x35, 0x27, 0xc7, 0x3b, 0x2f, 0xbf, 0x3c, 0x2f, 0x04, 0xa4, 0x32, 0x89, + 0x0d, 0x6c, 0x96, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x1c, 0x64, 0x4e, 0xf6, 0x00, 0x00, + 0x00, +} + +func (this *Duration) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Duration) + if !ok { + that2, ok := that.(Duration) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Seconds != that1.Seconds { + if this.Seconds < that1.Seconds { + return -1 + } + return 1 + } + if this.Nanos != that1.Nanos { + if this.Nanos < that1.Nanos { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Duration) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Duration) + if !ok { + that2, ok := that.(Duration) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Seconds != that1.Seconds { + return false + } + if this.Nanos != that1.Nanos { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Duration) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Duration{") + s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") + s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDuration(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Duration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Duration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Nanos != 0 { + i = encodeVarintDuration(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = encodeVarintDuration(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintDuration(dAtA []byte, offset int, v uint64) int { + offset -= sovDuration(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Duration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovDuration(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + sovDuration(uint64(m.Nanos)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovDuration(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDuration(x uint64) (n int) { + return sovDuration(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Duration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDuration(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDuration + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDuration + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDuration(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDuration + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDuration + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDuration + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDuration = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDuration = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDuration = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/duration_gogo.go b/vendor/github.com/gogo/protobuf/types/duration_gogo.go new file mode 100644 index 000000000..90e7670e2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/duration_gogo.go @@ -0,0 +1,100 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "fmt" + "time" +) + +func NewPopulatedDuration(r interface { + Int63() int64 +}, easy bool) *Duration { + this := &Duration{} + maxSecs := time.Hour.Nanoseconds() / 1e9 + max := 2 * maxSecs + s := int64(r.Int63()) % max + s -= maxSecs + neg := int64(1) + if s < 0 { + neg = -1 + } + this.Seconds = s + this.Nanos = int32(neg * (r.Int63() % 1e9)) + return this +} + +func (d *Duration) String() string { + td, err := DurationFromProto(d) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return td.String() +} + +func NewPopulatedStdDuration(r interface { + Int63() int64 +}, easy bool) *time.Duration { + dur := NewPopulatedDuration(r, easy) + d, err := DurationFromProto(dur) + if err != nil { + return nil + } + return &d +} + +func SizeOfStdDuration(d time.Duration) int { + dur := DurationProto(d) + return dur.Size() +} + +func StdDurationMarshal(d time.Duration) ([]byte, error) { + size := SizeOfStdDuration(d) + buf := make([]byte, size) + _, err := StdDurationMarshalTo(d, buf) + return buf, err +} + +func StdDurationMarshalTo(d time.Duration, data []byte) (int, error) { + dur := DurationProto(d) + return dur.MarshalTo(data) +} + +func StdDurationUnmarshal(d *time.Duration, data []byte) error { + dur := &Duration{} + if err := dur.Unmarshal(data); err != nil { + return err + } + dd, err := DurationFromProto(dur) + if err != nil { + return err + } + *d = dd + return nil +} diff --git a/vendor/github.com/gogo/protobuf/types/empty.pb.go b/vendor/github.com/gogo/protobuf/types/empty.pb.go new file mode 100644 index 000000000..17e3aa558 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/empty.pb.go @@ -0,0 +1,465 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/empty.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_900544acb223d5b8, []int{0} +} +func (*Empty) XXX_WellKnownType() string { return "Empty" } +func (m *Empty) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return m.Size() +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func (*Empty) XXX_MessageName() string { + return "google.protobuf.Empty" +} +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} + +func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) } + +var fileDescriptor_900544acb223d5b8 = []byte{ + // 176 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, + 0x90, 0xbc, 0x53, 0x0b, 0xe3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28, + 0xc7, 0xd8, 0xf0, 0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, + 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x20, 0xf1, 0xc7, 0x72, + 0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe8, 0xc4, 0x05, + 0x36, 0x2e, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8, + 0xb8, 0x88, 0x89, 0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x7d, 0x00, 0x54, 0xbd, + 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8, + 0x20, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x21, 0xbe, 0xb6, 0x31, 0xc6, 0x00, 0x00, 0x00, +} + +func (this *Empty) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Empty) + if !ok { + that2, ok := that.(Empty) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Empty) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Empty) + if !ok { + that2, ok := that.(Empty) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Empty) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&types.Empty{") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringEmpty(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Empty) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Empty) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Empty) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func encodeVarintEmpty(dAtA []byte, offset int, v uint64) int { + offset -= sovEmpty(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedEmpty(r randyEmpty, easy bool) *Empty { + this := &Empty{} + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedEmpty(r, 1) + } + return this +} + +type randyEmpty interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneEmpty(r randyEmpty) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringEmpty(r randyEmpty) string { + v1 := r.Intn(100) + tmps := make([]rune, v1) + for i := 0; i < v1; i++ { + tmps[i] = randUTF8RuneEmpty(r) + } + return string(tmps) +} +func randUnrecognizedEmpty(r randyEmpty, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldEmpty(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldEmpty(dAtA []byte, r randyEmpty, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + v2 := r.Int63() + if r.Intn(2) == 0 { + v2 *= -1 + } + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(v2)) + case 1: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateEmpty(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Empty) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovEmpty(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEmpty(x uint64) (n int) { + return sovEmpty(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Empty) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Empty{`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringEmpty(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Empty) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEmpty + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Empty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipEmpty(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEmpty + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEmpty + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEmpty(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEmpty + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEmpty + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEmpty + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEmpty = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEmpty = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEmpty = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/field_mask.pb.go b/vendor/github.com/gogo/protobuf/types/field_mask.pb.go new file mode 100644 index 000000000..7226b57f7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/field_mask.pb.go @@ -0,0 +1,741 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is duplicated or unmappable. +type FieldMask struct { + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldMask) Reset() { *m = FieldMask{} } +func (*FieldMask) ProtoMessage() {} +func (*FieldMask) Descriptor() ([]byte, []int) { + return fileDescriptor_5158202634f0da48, []int{0} +} +func (m *FieldMask) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FieldMask) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldMask.Merge(m, src) +} +func (m *FieldMask) XXX_Size() int { + return m.Size() +} +func (m *FieldMask) XXX_DiscardUnknown() { + xxx_messageInfo_FieldMask.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldMask proto.InternalMessageInfo + +func (m *FieldMask) GetPaths() []string { + if m != nil { + return m.Paths + } + return nil +} + +func (*FieldMask) XXX_MessageName() string { + return "google.protobuf.FieldMask" +} +func init() { + proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask") +} + +func init() { proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_5158202634f0da48) } + +var fileDescriptor_5158202634f0da48 = []byte{ + // 203 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, + 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54, + 0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16, + 0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x1d, 0x8c, + 0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xf8, 0xe3, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, + 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, + 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x80, 0xc4, 0x1f, 0xcb, 0x31, 0x9e, 0x78, 0x2c, 0xc7, + 0xc8, 0x25, 0x9c, 0x9c, 0x9f, 0xab, 0x87, 0x66, 0x95, 0x13, 0x1f, 0xdc, 0xa2, 0x00, 0x90, 0x50, + 0x00, 0x63, 0x14, 0x6b, 0x49, 0x65, 0x41, 0x6a, 0xf1, 0x0f, 0x46, 0xc6, 0x45, 0x4c, 0xcc, 0xee, + 0x01, 0x4e, 0xab, 0x98, 0xe4, 0xdc, 0x21, 0x7a, 0x02, 0xa0, 0x7a, 0xf4, 0xc2, 0x53, 0x73, 0x72, + 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0x2a, 0x93, 0xd8, 0xc0, 0x86, 0x19, 0x03, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x43, 0xa0, 0x83, 0xd0, 0xe9, 0x00, 0x00, 0x00, +} + +func (this *FieldMask) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*FieldMask) + if !ok { + that2, ok := that.(FieldMask) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Paths) != len(that1.Paths) { + if len(this.Paths) < len(that1.Paths) { + return -1 + } + return 1 + } + for i := range this.Paths { + if this.Paths[i] != that1.Paths[i] { + if this.Paths[i] < that1.Paths[i] { + return -1 + } + return 1 + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *FieldMask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FieldMask) + if !ok { + that2, ok := that.(FieldMask) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Paths) != len(that1.Paths) { + return false + } + for i := range this.Paths { + if this.Paths[i] != that1.Paths[i] { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *FieldMask) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.FieldMask{") + s = append(s, "Paths: "+fmt.Sprintf("%#v", this.Paths)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringFieldMask(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *FieldMask) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldMask) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldMask) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Paths[iNdEx]) + copy(dAtA[i:], m.Paths[iNdEx]) + i = encodeVarintFieldMask(dAtA, i, uint64(len(m.Paths[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintFieldMask(dAtA []byte, offset int, v uint64) int { + offset -= sovFieldMask(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedFieldMask(r randyFieldMask, easy bool) *FieldMask { + this := &FieldMask{} + v1 := r.Intn(10) + this.Paths = make([]string, v1) + for i := 0; i < v1; i++ { + this.Paths[i] = string(randStringFieldMask(r)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedFieldMask(r, 2) + } + return this +} + +type randyFieldMask interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneFieldMask(r randyFieldMask) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringFieldMask(r randyFieldMask) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneFieldMask(r) + } + return string(tmps) +} +func randUnrecognizedFieldMask(r randyFieldMask, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldFieldMask(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldFieldMask(dAtA []byte, r randyFieldMask, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateFieldMask(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *FieldMask) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + l = len(s) + n += 1 + l + sovFieldMask(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovFieldMask(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozFieldMask(x uint64) (n int) { + return sovFieldMask(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *FieldMask) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FieldMask{`, + `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringFieldMask(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FieldMask) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFieldMask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldMask: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldMask: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFieldMask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFieldMask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFieldMask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFieldMask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFieldMask + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthFieldMask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFieldMask(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthFieldMask + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupFieldMask + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthFieldMask + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthFieldMask = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFieldMask = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupFieldMask = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/protosize.go b/vendor/github.com/gogo/protobuf/types/protosize.go new file mode 100644 index 000000000..3a2d1b7e1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/protosize.go @@ -0,0 +1,34 @@ +package types + +func (m *Any) ProtoSize() (n int) { return m.Size() } +func (m *Api) ProtoSize() (n int) { return m.Size() } +func (m *Method) ProtoSize() (n int) { return m.Size() } +func (m *Mixin) ProtoSize() (n int) { return m.Size() } +func (m *Duration) ProtoSize() (n int) { return m.Size() } +func (m *Empty) ProtoSize() (n int) { return m.Size() } +func (m *FieldMask) ProtoSize() (n int) { return m.Size() } +func (m *SourceContext) ProtoSize() (n int) { return m.Size() } +func (m *Struct) ProtoSize() (n int) { return m.Size() } +func (m *Value) ProtoSize() (n int) { return m.Size() } +func (m *Value_NullValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_NumberValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_StringValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_BoolValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_StructValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_ListValue) ProtoSize() (n int) { return m.Size() } +func (m *ListValue) ProtoSize() (n int) { return m.Size() } +func (m *Timestamp) ProtoSize() (n int) { return m.Size() } +func (m *Type) ProtoSize() (n int) { return m.Size() } +func (m *Field) ProtoSize() (n int) { return m.Size() } +func (m *Enum) ProtoSize() (n int) { return m.Size() } +func (m *EnumValue) ProtoSize() (n int) { return m.Size() } +func (m *Option) ProtoSize() (n int) { return m.Size() } +func (m *DoubleValue) ProtoSize() (n int) { return m.Size() } +func (m *FloatValue) ProtoSize() (n int) { return m.Size() } +func (m *Int64Value) ProtoSize() (n int) { return m.Size() } +func (m *UInt64Value) ProtoSize() (n int) { return m.Size() } +func (m *Int32Value) ProtoSize() (n int) { return m.Size() } +func (m *UInt32Value) ProtoSize() (n int) { return m.Size() } +func (m *BoolValue) ProtoSize() (n int) { return m.Size() } +func (m *StringValue) ProtoSize() (n int) { return m.Size() } +func (m *BytesValue) ProtoSize() (n int) { return m.Size() } diff --git a/vendor/github.com/gogo/protobuf/types/source_context.pb.go b/vendor/github.com/gogo/protobuf/types/source_context.pb.go new file mode 100644 index 000000000..61045ce10 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/source_context.pb.go @@ -0,0 +1,527 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/source_context.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `SourceContext` represents information about the source of a +// protobuf element, like the file in which it is defined. +type SourceContext struct { + // The path-qualified name of the .proto file that contained the associated + // protobuf element. For example: `"google/protobuf/source_context.proto"`. + FileName string `protobuf:"bytes,1,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceContext) Reset() { *m = SourceContext{} } +func (*SourceContext) ProtoMessage() {} +func (*SourceContext) Descriptor() ([]byte, []int) { + return fileDescriptor_b686cdb126d509db, []int{0} +} +func (m *SourceContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SourceContext.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SourceContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceContext.Merge(m, src) +} +func (m *SourceContext) XXX_Size() int { + return m.Size() +} +func (m *SourceContext) XXX_DiscardUnknown() { + xxx_messageInfo_SourceContext.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceContext proto.InternalMessageInfo + +func (m *SourceContext) GetFileName() string { + if m != nil { + return m.FileName + } + return "" +} + +func (*SourceContext) XXX_MessageName() string { + return "google.protobuf.SourceContext" +} +func init() { + proto.RegisterType((*SourceContext)(nil), "google.protobuf.SourceContext") +} + +func init() { + proto.RegisterFile("google/protobuf/source_context.proto", fileDescriptor_b686cdb126d509db) +} + +var fileDescriptor_b686cdb126d509db = []byte{ + // 212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xce, 0x2f, 0x2d, + 0x4a, 0x4e, 0x8d, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x03, 0x8b, 0x0b, 0xf1, 0x43, + 0x54, 0xe9, 0xc1, 0x54, 0x29, 0xe9, 0x70, 0xf1, 0x06, 0x83, 0x15, 0x3a, 0x43, 0xd4, 0x09, 0x49, + 0x73, 0x71, 0xa6, 0x65, 0xe6, 0xa4, 0xc6, 0xe7, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, + 0x70, 0x06, 0x71, 0x80, 0x04, 0xfc, 0x12, 0x73, 0x53, 0x9d, 0x3a, 0x19, 0x6f, 0x3c, 0x94, 0x63, + 0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, + 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, + 0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0x3c, 0xf1, 0x58, 0x8e, 0x91, 0x4b, 0x38, 0x39, + 0x3f, 0x57, 0x0f, 0xcd, 0x56, 0x27, 0x21, 0x14, 0x3b, 0x03, 0x40, 0xc2, 0x01, 0x8c, 0x51, 0xac, + 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, + 0x34, 0x05, 0x40, 0x35, 0xe9, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, + 0x94, 0x25, 0xb1, 0x81, 0x4d, 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x37, 0x2a, 0xa1, + 0xf9, 0x00, 0x00, 0x00, +} + +func (this *SourceContext) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*SourceContext) + if !ok { + that2, ok := that.(SourceContext) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.FileName != that1.FileName { + if this.FileName < that1.FileName { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *SourceContext) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SourceContext) + if !ok { + that2, ok := that.(SourceContext) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.FileName != that1.FileName { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *SourceContext) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.SourceContext{") + s = append(s, "FileName: "+fmt.Sprintf("%#v", this.FileName)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringSourceContext(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *SourceContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.FileName) > 0 { + i -= len(m.FileName) + copy(dAtA[i:], m.FileName) + i = encodeVarintSourceContext(dAtA, i, uint64(len(m.FileName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintSourceContext(dAtA []byte, offset int, v uint64) int { + offset -= sovSourceContext(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedSourceContext(r randySourceContext, easy bool) *SourceContext { + this := &SourceContext{} + this.FileName = string(randStringSourceContext(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedSourceContext(r, 2) + } + return this +} + +type randySourceContext interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneSourceContext(r randySourceContext) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringSourceContext(r randySourceContext) string { + v1 := r.Intn(100) + tmps := make([]rune, v1) + for i := 0; i < v1; i++ { + tmps[i] = randUTF8RuneSourceContext(r) + } + return string(tmps) +} +func randUnrecognizedSourceContext(r randySourceContext, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldSourceContext(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldSourceContext(dAtA []byte, r randySourceContext, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + v2 := r.Int63() + if r.Intn(2) == 0 { + v2 *= -1 + } + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(v2)) + case 1: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateSourceContext(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *SourceContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FileName) + if l > 0 { + n += 1 + l + sovSourceContext(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovSourceContext(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSourceContext(x uint64) (n int) { + return sovSourceContext(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SourceContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceContext{`, + `FileName:` + fmt.Sprintf("%v", this.FileName) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringSourceContext(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SourceContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSourceContext + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FileName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSourceContext + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSourceContext + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSourceContext + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FileName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSourceContext(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSourceContext + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSourceContext + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSourceContext(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSourceContext + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupSourceContext + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthSourceContext + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthSourceContext = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSourceContext = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupSourceContext = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/struct.pb.go b/vendor/github.com/gogo/protobuf/types/struct.pb.go new file mode 100644 index 000000000..cea553eef --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/struct.pb.go @@ -0,0 +1,2280 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/struct.proto + +package types + +import ( + bytes "bytes" + encoding_binary "encoding/binary" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} + +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (NullValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} + +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} +func (*Struct) XXX_WellKnownType() string { return "Struct" } +func (m *Struct) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Struct.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Struct) XXX_Merge(src proto.Message) { + xxx_messageInfo_Struct.Merge(m, src) +} +func (m *Struct) XXX_Size() int { + return m.Size() +} +func (m *Struct) XXX_DiscardUnknown() { + xxx_messageInfo_Struct.DiscardUnknown(m) +} + +var xxx_messageInfo_Struct proto.InternalMessageInfo + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +func (*Struct) XXX_MessageName() string { + return "google.protobuf.Struct" +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{1} +} +func (*Value) XXX_WellKnownType() string { return "Value" } +func (m *Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(m, src) +} +func (m *Value) XXX_Size() int { + return m.Size() +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Value proto.InternalMessageInfo + +type isValue_Kind interface { + isValue_Kind() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int + Compare(interface{}) int +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof" json:"null_value,omitempty"` +} +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof" json:"number_value,omitempty"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof" json:"string_value,omitempty"` +} +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof" json:"bool_value,omitempty"` +} +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof" json:"struct_value,omitempty"` +} +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof" json:"list_value,omitempty"` +} + +func (*Value_NullValue) isValue_Kind() {} +func (*Value_NumberValue) isValue_Kind() {} +func (*Value_StringValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} +func (*Value_StructValue) isValue_Kind() {} +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Value) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func (*Value) XXX_MessageName() string { + return "google.protobuf.Value" +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{2} +} +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } +func (m *ListValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(m, src) +} +func (m *ListValue) XXX_Size() int { + return m.Size() +} +func (m *ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ListValue proto.InternalMessageInfo + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func (*ListValue) XXX_MessageName() string { + return "google.protobuf.ListValue" +} +func init() { + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") +} + +func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) } + +var fileDescriptor_df322afd6c9fb402 = []byte{ + // 443 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xb1, 0x6f, 0xd3, 0x40, + 0x14, 0xc6, 0xfd, 0x9c, 0xc6, 0x22, 0xcf, 0xa8, 0x54, 0x87, 0x04, 0x51, 0x41, 0x47, 0x94, 0x2e, + 0x11, 0x42, 0xae, 0x14, 0x16, 0x44, 0x58, 0x88, 0x54, 0x5a, 0x89, 0xa8, 0x32, 0x86, 0x16, 0x89, + 0x25, 0xc2, 0xae, 0x1b, 0x59, 0xbd, 0xde, 0x55, 0xf6, 0x1d, 0x28, 0x1b, 0x0b, 0xff, 0x03, 0x33, + 0x13, 0x62, 0xe4, 0xaf, 0xe8, 0xc8, 0xc8, 0x48, 0xdc, 0x85, 0xb1, 0x63, 0x47, 0x74, 0x77, 0xb6, + 0x41, 0x8d, 0xb2, 0xf9, 0x7d, 0xf7, 0x7b, 0xdf, 0x7b, 0xdf, 0x33, 0xde, 0x9f, 0x09, 0x31, 0x63, + 0xe9, 0xf6, 0x59, 0x2e, 0xa4, 0x88, 0xd5, 0xf1, 0x76, 0x21, 0x73, 0x95, 0xc8, 0xc0, 0xd4, 0xe4, + 0x96, 0x7d, 0x0d, 0xea, 0xd7, 0xfe, 0x17, 0x40, 0xef, 0xb5, 0x21, 0xc8, 0x08, 0xbd, 0xe3, 0x2c, + 0x65, 0x47, 0x45, 0x17, 0x7a, 0xad, 0x81, 0x3f, 0xdc, 0x0a, 0xae, 0xc1, 0x81, 0x05, 0x83, 0x17, + 0x86, 0xda, 0xe1, 0x32, 0x9f, 0x47, 0x55, 0xcb, 0xe6, 0x2b, 0xf4, 0xff, 0x93, 0xc9, 0x06, 0xb6, + 0x4e, 0xd2, 0x79, 0x17, 0x7a, 0x30, 0xe8, 0x44, 0xfa, 0x93, 0x3c, 0xc2, 0xf6, 0x87, 0xf7, 0x4c, + 0xa5, 0x5d, 0xb7, 0x07, 0x03, 0x7f, 0x78, 0x67, 0xc9, 0xfc, 0x50, 0xbf, 0x46, 0x16, 0x7a, 0xea, + 0x3e, 0x81, 0xfe, 0x0f, 0x17, 0xdb, 0x46, 0x24, 0x23, 0x44, 0xae, 0x18, 0x9b, 0x5a, 0x03, 0x6d, + 0xba, 0x3e, 0xdc, 0x5c, 0x32, 0xd8, 0x57, 0x8c, 0x19, 0x7e, 0xcf, 0x89, 0x3a, 0xbc, 0x2e, 0xc8, + 0x16, 0xde, 0xe4, 0xea, 0x34, 0x4e, 0xf3, 0xe9, 0xbf, 0xf9, 0xb0, 0xe7, 0x44, 0xbe, 0x55, 0x1b, + 0xa8, 0x90, 0x79, 0xc6, 0x67, 0x15, 0xd4, 0xd2, 0x8b, 0x6b, 0xc8, 0xaa, 0x16, 0x7a, 0x80, 0x18, + 0x0b, 0x51, 0xaf, 0xb1, 0xd6, 0x83, 0xc1, 0x0d, 0x3d, 0x4a, 0x6b, 0x16, 0x78, 0x66, 0x5c, 0x54, + 0x22, 0x2b, 0xa4, 0x6d, 0xa2, 0xde, 0x5d, 0x71, 0xc7, 0xca, 0x5e, 0x25, 0xb2, 0x49, 0xc9, 0xb2, + 0xa2, 0xee, 0xf5, 0x4c, 0xef, 0x72, 0xca, 0x49, 0x56, 0xc8, 0x26, 0x25, 0xab, 0x8b, 0xb1, 0x87, + 0x6b, 0x27, 0x19, 0x3f, 0xea, 0x8f, 0xb0, 0xd3, 0x10, 0x24, 0x40, 0xcf, 0x98, 0xd5, 0x7f, 0x74, + 0xd5, 0xd1, 0x2b, 0xea, 0xe1, 0x3d, 0xec, 0x34, 0x47, 0x24, 0xeb, 0x88, 0xfb, 0x07, 0x93, 0xc9, + 0xf4, 0xf0, 0xf9, 0xe4, 0x60, 0x67, 0xc3, 0x19, 0x7f, 0x86, 0x5f, 0x0b, 0xea, 0x5c, 0x2e, 0x28, + 0x5c, 0x2d, 0x28, 0x7c, 0x2a, 0x29, 0x7c, 0x2b, 0x29, 0x9c, 0x97, 0x14, 0x7e, 0x96, 0x14, 0x7e, + 0x97, 0x14, 0xfe, 0x94, 0xd4, 0xb9, 0xd4, 0xfa, 0x05, 0x85, 0xf3, 0x0b, 0x0a, 0x78, 0x3b, 0x11, + 0xa7, 0xd7, 0x47, 0x8e, 0x7d, 0x9b, 0x3e, 0xd4, 0x75, 0x08, 0xef, 0xda, 0x72, 0x7e, 0x96, 0x16, + 0x57, 0x00, 0x5f, 0xdd, 0xd6, 0x6e, 0x38, 0xfe, 0xee, 0xd2, 0x5d, 0xdb, 0x10, 0xd6, 0x3b, 0xbe, + 0x4d, 0x19, 0x7b, 0xc9, 0xc5, 0x47, 0xfe, 0x46, 0x93, 0xb1, 0x67, 0x9c, 0x1e, 0xff, 0x0d, 0x00, + 0x00, 0xff, 0xff, 0x26, 0x30, 0xdb, 0xbe, 0xe9, 0x02, 0x00, 0x00, +} + +func (this *Struct) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Struct) + if !ok { + that2, ok := that.(Struct) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Fields) != len(that1.Fields) { + if len(this.Fields) < len(that1.Fields) { + return -1 + } + return 1 + } + for i := range this.Fields { + if c := this.Fields[i].Compare(that1.Fields[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value) + if !ok { + that2, ok := that.(Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if that1.Kind == nil { + if this.Kind != nil { + return 1 + } + } else if this.Kind == nil { + return -1 + } else { + thisType := -1 + switch this.Kind.(type) { + case *Value_NullValue: + thisType = 0 + case *Value_NumberValue: + thisType = 1 + case *Value_StringValue: + thisType = 2 + case *Value_BoolValue: + thisType = 3 + case *Value_StructValue: + thisType = 4 + case *Value_ListValue: + thisType = 5 + default: + panic(fmt.Sprintf("compare: unexpected type %T in oneof", this.Kind)) + } + that1Type := -1 + switch that1.Kind.(type) { + case *Value_NullValue: + that1Type = 0 + case *Value_NumberValue: + that1Type = 1 + case *Value_StringValue: + that1Type = 2 + case *Value_BoolValue: + that1Type = 3 + case *Value_StructValue: + that1Type = 4 + case *Value_ListValue: + that1Type = 5 + default: + panic(fmt.Sprintf("compare: unexpected type %T in oneof", that1.Kind)) + } + if thisType == that1Type { + if c := this.Kind.Compare(that1.Kind); c != 0 { + return c + } + } else if thisType < that1Type { + return -1 + } else if thisType > that1Type { + return 1 + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Value_NullValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_NullValue) + if !ok { + that2, ok := that.(Value_NullValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.NullValue != that1.NullValue { + if this.NullValue < that1.NullValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_NumberValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_NumberValue) + if !ok { + that2, ok := that.(Value_NumberValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.NumberValue != that1.NumberValue { + if this.NumberValue < that1.NumberValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_StringValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_StringValue) + if !ok { + that2, ok := that.(Value_StringValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.StringValue != that1.StringValue { + if this.StringValue < that1.StringValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_BoolValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_BoolValue) + if !ok { + that2, ok := that.(Value_BoolValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.BoolValue != that1.BoolValue { + if !this.BoolValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_StructValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_StructValue) + if !ok { + that2, ok := that.(Value_StructValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := this.StructValue.Compare(that1.StructValue); c != 0 { + return c + } + return 0 +} +func (this *Value_ListValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_ListValue) + if !ok { + that2, ok := that.(Value_ListValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := this.ListValue.Compare(that1.ListValue); c != 0 { + return c + } + return 0 +} +func (this *ListValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*ListValue) + if !ok { + that2, ok := that.(ListValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Values) != len(that1.Values) { + if len(this.Values) < len(that1.Values) { + return -1 + } + return 1 + } + for i := range this.Values { + if c := this.Values[i].Compare(that1.Values[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (x NullValue) String() string { + s, ok := NullValue_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Struct) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Struct) + if !ok { + that2, ok := that.(Struct) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Fields) != len(that1.Fields) { + return false + } + for i := range this.Fields { + if !this.Fields[i].Equal(that1.Fields[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value) + if !ok { + that2, ok := that.(Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Kind == nil { + if this.Kind != nil { + return false + } + } else if this.Kind == nil { + return false + } else if !this.Kind.Equal(that1.Kind) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Value_NullValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_NullValue) + if !ok { + that2, ok := that.(Value_NullValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.NullValue != that1.NullValue { + return false + } + return true +} +func (this *Value_NumberValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_NumberValue) + if !ok { + that2, ok := that.(Value_NumberValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.NumberValue != that1.NumberValue { + return false + } + return true +} +func (this *Value_StringValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_StringValue) + if !ok { + that2, ok := that.(Value_StringValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.StringValue != that1.StringValue { + return false + } + return true +} +func (this *Value_BoolValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_BoolValue) + if !ok { + that2, ok := that.(Value_BoolValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.BoolValue != that1.BoolValue { + return false + } + return true +} +func (this *Value_StructValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_StructValue) + if !ok { + that2, ok := that.(Value_StructValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.StructValue.Equal(that1.StructValue) { + return false + } + return true +} +func (this *Value_ListValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_ListValue) + if !ok { + that2, ok := that.(Value_ListValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ListValue.Equal(that1.ListValue) { + return false + } + return true +} +func (this *ListValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ListValue) + if !ok { + that2, ok := that.(ListValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Values) != len(that1.Values) { + return false + } + for i := range this.Values { + if !this.Values[i].Equal(that1.Values[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Struct) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Struct{") + keysForFields := make([]string, 0, len(this.Fields)) + for k := range this.Fields { + keysForFields = append(keysForFields, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForFields) + mapStringForFields := "map[string]*Value{" + for _, k := range keysForFields { + mapStringForFields += fmt.Sprintf("%#v: %#v,", k, this.Fields[k]) + } + mapStringForFields += "}" + if this.Fields != nil { + s = append(s, "Fields: "+mapStringForFields+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&types.Value{") + if this.Kind != nil { + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value_NullValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_NullValue{` + + `NullValue:` + fmt.Sprintf("%#v", this.NullValue) + `}`}, ", ") + return s +} +func (this *Value_NumberValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_NumberValue{` + + `NumberValue:` + fmt.Sprintf("%#v", this.NumberValue) + `}`}, ", ") + return s +} +func (this *Value_StringValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_StringValue{` + + `StringValue:` + fmt.Sprintf("%#v", this.StringValue) + `}`}, ", ") + return s +} +func (this *Value_BoolValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_BoolValue{` + + `BoolValue:` + fmt.Sprintf("%#v", this.BoolValue) + `}`}, ", ") + return s +} +func (this *Value_StructValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_StructValue{` + + `StructValue:` + fmt.Sprintf("%#v", this.StructValue) + `}`}, ", ") + return s +} +func (this *Value_ListValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_ListValue{` + + `ListValue:` + fmt.Sprintf("%#v", this.ListValue) + `}`}, ", ") + return s +} +func (this *ListValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.ListValue{") + if this.Values != nil { + s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringStruct(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Struct) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Struct) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Struct) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Fields) > 0 { + for k := range m.Fields { + v := m.Fields[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintStruct(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintStruct(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Kind != nil { + { + size := m.Kind.Size() + i -= size + if _, err := m.Kind.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Value_NullValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_NullValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintStruct(dAtA, i, uint64(m.NullValue)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Value_NumberValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_NumberValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.NumberValue)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Value_StringValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = encodeVarintStruct(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *Value_BoolValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *Value_StructValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_StructValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StructValue != nil { + { + size, err := m.StructValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Value_ListValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_ListValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListValue != nil { + { + size, err := m.ListValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *ListValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintStruct(dAtA []byte, offset int, v uint64) int { + offset -= sovStruct(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedStruct(r randyStruct, easy bool) *Struct { + this := &Struct{} + if r.Intn(5) == 0 { + v1 := r.Intn(10) + this.Fields = make(map[string]*Value) + for i := 0; i < v1; i++ { + this.Fields[randStringStruct(r)] = NewPopulatedValue(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 2) + } + return this +} + +func NewPopulatedValue(r randyStruct, easy bool) *Value { + this := &Value{} + oneofNumber_Kind := []int32{1, 2, 3, 4, 5, 6}[r.Intn(6)] + switch oneofNumber_Kind { + case 1: + this.Kind = NewPopulatedValue_NullValue(r, easy) + case 2: + this.Kind = NewPopulatedValue_NumberValue(r, easy) + case 3: + this.Kind = NewPopulatedValue_StringValue(r, easy) + case 4: + this.Kind = NewPopulatedValue_BoolValue(r, easy) + case 5: + this.Kind = NewPopulatedValue_StructValue(r, easy) + case 6: + this.Kind = NewPopulatedValue_ListValue(r, easy) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 7) + } + return this +} + +func NewPopulatedValue_NullValue(r randyStruct, easy bool) *Value_NullValue { + this := &Value_NullValue{} + this.NullValue = NullValue([]int32{0}[r.Intn(1)]) + return this +} +func NewPopulatedValue_NumberValue(r randyStruct, easy bool) *Value_NumberValue { + this := &Value_NumberValue{} + this.NumberValue = float64(r.Float64()) + if r.Intn(2) == 0 { + this.NumberValue *= -1 + } + return this +} +func NewPopulatedValue_StringValue(r randyStruct, easy bool) *Value_StringValue { + this := &Value_StringValue{} + this.StringValue = string(randStringStruct(r)) + return this +} +func NewPopulatedValue_BoolValue(r randyStruct, easy bool) *Value_BoolValue { + this := &Value_BoolValue{} + this.BoolValue = bool(bool(r.Intn(2) == 0)) + return this +} +func NewPopulatedValue_StructValue(r randyStruct, easy bool) *Value_StructValue { + this := &Value_StructValue{} + this.StructValue = NewPopulatedStruct(r, easy) + return this +} +func NewPopulatedValue_ListValue(r randyStruct, easy bool) *Value_ListValue { + this := &Value_ListValue{} + this.ListValue = NewPopulatedListValue(r, easy) + return this +} +func NewPopulatedListValue(r randyStruct, easy bool) *ListValue { + this := &ListValue{} + if r.Intn(5) == 0 { + v2 := r.Intn(5) + this.Values = make([]*Value, v2) + for i := 0; i < v2; i++ { + this.Values[i] = NewPopulatedValue(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 2) + } + return this +} + +type randyStruct interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneStruct(r randyStruct) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringStruct(r randyStruct) string { + v3 := r.Intn(100) + tmps := make([]rune, v3) + for i := 0; i < v3; i++ { + tmps[i] = randUTF8RuneStruct(r) + } + return string(tmps) +} +func randUnrecognizedStruct(r randyStruct, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldStruct(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldStruct(dAtA []byte, r randyStruct, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + v4 := r.Int63() + if r.Intn(2) == 0 { + v4 *= -1 + } + dAtA = encodeVarintPopulateStruct(dAtA, uint64(v4)) + case 1: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateStruct(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateStruct(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Struct) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fields) > 0 { + for k, v := range m.Fields { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovStruct(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovStruct(uint64(len(k))) + l + n += mapEntrySize + 1 + sovStruct(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != nil { + n += m.Kind.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Value_NullValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovStruct(uint64(m.NullValue)) + return n +} +func (m *Value_NumberValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Value_StringValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringValue) + n += 1 + l + sovStruct(uint64(l)) + return n +} +func (m *Value_BoolValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *Value_StructValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StructValue != nil { + l = m.StructValue.Size() + n += 1 + l + sovStruct(uint64(l)) + } + return n +} +func (m *Value_ListValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListValue != nil { + l = m.ListValue.Size() + n += 1 + l + sovStruct(uint64(l)) + } + return n +} +func (m *ListValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = e.Size() + n += 1 + l + sovStruct(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovStruct(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozStruct(x uint64) (n int) { + return sovStruct(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Struct) String() string { + if this == nil { + return "nil" + } + keysForFields := make([]string, 0, len(this.Fields)) + for k := range this.Fields { + keysForFields = append(keysForFields, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForFields) + mapStringForFields := "map[string]*Value{" + for _, k := range keysForFields { + mapStringForFields += fmt.Sprintf("%v: %v,", k, this.Fields[k]) + } + mapStringForFields += "}" + s := strings.Join([]string{`&Struct{`, + `Fields:` + mapStringForFields + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Value_NullValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_NullValue{`, + `NullValue:` + fmt.Sprintf("%v", this.NullValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_NumberValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_NumberValue{`, + `NumberValue:` + fmt.Sprintf("%v", this.NumberValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_StringValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_StringValue{`, + `StringValue:` + fmt.Sprintf("%v", this.StringValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_BoolValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_BoolValue{`, + `BoolValue:` + fmt.Sprintf("%v", this.BoolValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_StructValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_StructValue{`, + `StructValue:` + strings.Replace(fmt.Sprintf("%v", this.StructValue), "Struct", "Struct", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Value_ListValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_ListValue{`, + `ListValue:` + strings.Replace(fmt.Sprintf("%v", this.ListValue), "ListValue", "ListValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListValue) String() string { + if this == nil { + return "nil" + } + repeatedStringForValues := "[]*Value{" + for _, f := range this.Values { + repeatedStringForValues += strings.Replace(f.String(), "Value", "Value", 1) + "," + } + repeatedStringForValues += "}" + s := strings.Join([]string{`&ListValue{`, + `Values:` + repeatedStringForValues + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringStruct(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Struct) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Struct: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Struct: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Fields == nil { + m.Fields = make(map[string]*Value) + } + var mapkey string + var mapvalue *Value + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthStruct + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthStruct + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthStruct + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthStruct + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Value{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Fields[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NullValue", wireType) + } + var v NullValue + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= NullValue(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Kind = &Value_NullValue{v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Kind = &Value_NumberValue{float64(math.Float64frombits(v))} + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = &Value_StringValue{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Kind = &Value_BoolValue{b} + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StructValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Struct{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Value_StructValue{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ListValue{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Value_ListValue{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &Value{}) + if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStruct(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthStruct + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupStruct + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthStruct + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthStruct = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStruct = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupStruct = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/timestamp.go b/vendor/github.com/gogo/protobuf/types/timestamp.go new file mode 100644 index 000000000..232ada57c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/timestamp.go @@ -0,0 +1,130 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func TimestampFromProto(ts *Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*Timestamp, error) { + ts := &Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *Timestamp) string { + t, err := TimestampFromProto(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/gogo/protobuf/types/timestamp.pb.go b/vendor/github.com/gogo/protobuf/types/timestamp.pb.go new file mode 100644 index 000000000..b81875267 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/timestamp.pb.go @@ -0,0 +1,542 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_292007bbfe81227e, []int{0} +} +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) +} +func (m *Timestamp) XXX_Size() int { + return m.Size() +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func (*Timestamp) XXX_MessageName() string { + return "google.protobuf.Timestamp" +} +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } + +var fileDescriptor_292007bbfe81227e = []byte{ + // 212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x03, 0xe3, 0x8d, + 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, + 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0xe1, 0x91, 0x1c, + 0xe3, 0x8a, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, + 0x59, 0xee, 0xc4, 0x07, 0xb7, 0x3a, 0x00, 0x24, 0x14, 0xc0, 0x18, 0xc5, 0x5a, 0x52, 0x59, 0x90, + 0x5a, 0xfc, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, + 0x9e, 0x00, 0xa8, 0x1e, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, 0x90, + 0xca, 0x24, 0x36, 0xb0, 0x61, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x23, 0x83, 0xdd, + 0xfa, 0x00, 0x00, 0x00, +} + +func (this *Timestamp) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Timestamp) + if !ok { + that2, ok := that.(Timestamp) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Seconds != that1.Seconds { + if this.Seconds < that1.Seconds { + return -1 + } + return 1 + } + if this.Nanos != that1.Nanos { + if this.Nanos < that1.Nanos { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Timestamp) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Timestamp) + if !ok { + that2, ok := that.(Timestamp) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Seconds != that1.Seconds { + return false + } + if this.Nanos != that1.Nanos { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Timestamp) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Timestamp{") + s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") + s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTimestamp(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Timestamp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Nanos != 0 { + i = encodeVarintTimestamp(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = encodeVarintTimestamp(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTimestamp(dAtA []byte, offset int, v uint64) int { + offset -= sovTimestamp(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Timestamp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovTimestamp(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + sovTimestamp(uint64(m.Nanos)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovTimestamp(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTimestamp(x uint64) (n int) { + return sovTimestamp(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Timestamp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTimestamp(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTimestamp + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTimestamp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTimestamp(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTimestamp + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTimestamp + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTimestamp + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTimestamp = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTimestamp = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTimestamp = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go new file mode 100644 index 000000000..e03fa1315 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go @@ -0,0 +1,94 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "time" +) + +func NewPopulatedTimestamp(r interface { + Int63() int64 +}, easy bool) *Timestamp { + this := &Timestamp{} + ns := int64(r.Int63()) + this.Seconds = ns / 1e9 + this.Nanos = int32(ns % 1e9) + return this +} + +func (ts *Timestamp) String() string { + return TimestampString(ts) +} + +func NewPopulatedStdTime(r interface { + Int63() int64 +}, easy bool) *time.Time { + timestamp := NewPopulatedTimestamp(r, easy) + t, err := TimestampFromProto(timestamp) + if err != nil { + return nil + } + return &t +} + +func SizeOfStdTime(t time.Time) int { + ts, err := TimestampProto(t) + if err != nil { + return 0 + } + return ts.Size() +} + +func StdTimeMarshal(t time.Time) ([]byte, error) { + size := SizeOfStdTime(t) + buf := make([]byte, size) + _, err := StdTimeMarshalTo(t, buf) + return buf, err +} + +func StdTimeMarshalTo(t time.Time, data []byte) (int, error) { + ts, err := TimestampProto(t) + if err != nil { + return 0, err + } + return ts.MarshalTo(data) +} + +func StdTimeUnmarshal(t *time.Time, data []byte) error { + ts := &Timestamp{} + if err := ts.Unmarshal(data); err != nil { + return err + } + tt, err := TimestampFromProto(ts) + if err != nil { + return err + } + *t = tt + return nil +} diff --git a/vendor/github.com/gogo/protobuf/types/type.pb.go b/vendor/github.com/gogo/protobuf/types/type.pb.go new file mode 100644 index 000000000..13b7ec02f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/type.pb.go @@ -0,0 +1,3370 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/type.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// The syntax in which a protocol buffer element is defined. +type Syntax int32 + +const ( + // Syntax `proto2`. + Syntax_SYNTAX_PROTO2 Syntax = 0 + // Syntax `proto3`. + Syntax_SYNTAX_PROTO3 Syntax = 1 +) + +var Syntax_name = map[int32]string{ + 0: "SYNTAX_PROTO2", + 1: "SYNTAX_PROTO3", +} + +var Syntax_value = map[string]int32{ + "SYNTAX_PROTO2": 0, + "SYNTAX_PROTO3": 1, +} + +func (Syntax) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{0} +} + +// Basic field types. +type Field_Kind int32 + +const ( + // Field type unknown. + Field_TYPE_UNKNOWN Field_Kind = 0 + // Field type double. + Field_TYPE_DOUBLE Field_Kind = 1 + // Field type float. + Field_TYPE_FLOAT Field_Kind = 2 + // Field type int64. + Field_TYPE_INT64 Field_Kind = 3 + // Field type uint64. + Field_TYPE_UINT64 Field_Kind = 4 + // Field type int32. + Field_TYPE_INT32 Field_Kind = 5 + // Field type fixed64. + Field_TYPE_FIXED64 Field_Kind = 6 + // Field type fixed32. + Field_TYPE_FIXED32 Field_Kind = 7 + // Field type bool. + Field_TYPE_BOOL Field_Kind = 8 + // Field type string. + Field_TYPE_STRING Field_Kind = 9 + // Field type group. Proto2 syntax only, and deprecated. + Field_TYPE_GROUP Field_Kind = 10 + // Field type message. + Field_TYPE_MESSAGE Field_Kind = 11 + // Field type bytes. + Field_TYPE_BYTES Field_Kind = 12 + // Field type uint32. + Field_TYPE_UINT32 Field_Kind = 13 + // Field type enum. + Field_TYPE_ENUM Field_Kind = 14 + // Field type sfixed32. + Field_TYPE_SFIXED32 Field_Kind = 15 + // Field type sfixed64. + Field_TYPE_SFIXED64 Field_Kind = 16 + // Field type sint32. + Field_TYPE_SINT32 Field_Kind = 17 + // Field type sint64. + Field_TYPE_SINT64 Field_Kind = 18 +) + +var Field_Kind_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var Field_Kind_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (Field_Kind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{1, 0} +} + +// Whether a field is optional, required, or repeated. +type Field_Cardinality int32 + +const ( + // For fields with unknown cardinality. + Field_CARDINALITY_UNKNOWN Field_Cardinality = 0 + // For optional fields. + Field_CARDINALITY_OPTIONAL Field_Cardinality = 1 + // For required fields. Proto2 syntax only. + Field_CARDINALITY_REQUIRED Field_Cardinality = 2 + // For repeated fields. + Field_CARDINALITY_REPEATED Field_Cardinality = 3 +) + +var Field_Cardinality_name = map[int32]string{ + 0: "CARDINALITY_UNKNOWN", + 1: "CARDINALITY_OPTIONAL", + 2: "CARDINALITY_REQUIRED", + 3: "CARDINALITY_REPEATED", +} + +var Field_Cardinality_value = map[string]int32{ + "CARDINALITY_UNKNOWN": 0, + "CARDINALITY_OPTIONAL": 1, + "CARDINALITY_REQUIRED": 2, + "CARDINALITY_REPEATED": 3, +} + +func (Field_Cardinality) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{1, 1} +} + +// A protocol buffer message type. +type Type struct { + // The fully qualified message name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The list of fields. + Fields []*Field `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"` + // The list of types appearing in `oneof` definitions in this type. + Oneofs []string `protobuf:"bytes,3,rep,name=oneofs,proto3" json:"oneofs,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"` + // The source context. + SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,6,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Type) Reset() { *m = Type{} } +func (*Type) ProtoMessage() {} +func (*Type) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{0} +} +func (m *Type) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Type) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Type.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Type) XXX_Merge(src proto.Message) { + xxx_messageInfo_Type.Merge(m, src) +} +func (m *Type) XXX_Size() int { + return m.Size() +} +func (m *Type) XXX_DiscardUnknown() { + xxx_messageInfo_Type.DiscardUnknown(m) +} + +var xxx_messageInfo_Type proto.InternalMessageInfo + +func (m *Type) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Type) GetFields() []*Field { + if m != nil { + return m.Fields + } + return nil +} + +func (m *Type) GetOneofs() []string { + if m != nil { + return m.Oneofs + } + return nil +} + +func (m *Type) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Type) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Type) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Type) XXX_MessageName() string { + return "google.protobuf.Type" +} + +// A single field of a message type. +type Field struct { + // The field type. + Kind Field_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=google.protobuf.Field_Kind" json:"kind,omitempty"` + // The field cardinality. + Cardinality Field_Cardinality `protobuf:"varint,2,opt,name=cardinality,proto3,enum=google.protobuf.Field_Cardinality" json:"cardinality,omitempty"` + // The field number. + Number int32 `protobuf:"varint,3,opt,name=number,proto3" json:"number,omitempty"` + // The field name. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The field type URL, without the scheme, for message or enumeration + // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. + TypeUrl string `protobuf:"bytes,6,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // The index of the field type in `Type.oneofs`, for message or enumeration + // types. The first type has index 1; zero means the type is not in the list. + OneofIndex int32 `protobuf:"varint,7,opt,name=oneof_index,json=oneofIndex,proto3" json:"oneof_index,omitempty"` + // Whether to use alternative packed wire representation. + Packed bool `protobuf:"varint,8,opt,name=packed,proto3" json:"packed,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,9,rep,name=options,proto3" json:"options,omitempty"` + // The field JSON name. + JsonName string `protobuf:"bytes,10,opt,name=json_name,json=jsonName,proto3" json:"json_name,omitempty"` + // The string value of the default value of this field. Proto2 syntax only. + DefaultValue string `protobuf:"bytes,11,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Field) Reset() { *m = Field{} } +func (*Field) ProtoMessage() {} +func (*Field) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{1} +} +func (m *Field) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Field) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Field.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Field) XXX_Merge(src proto.Message) { + xxx_messageInfo_Field.Merge(m, src) +} +func (m *Field) XXX_Size() int { + return m.Size() +} +func (m *Field) XXX_DiscardUnknown() { + xxx_messageInfo_Field.DiscardUnknown(m) +} + +var xxx_messageInfo_Field proto.InternalMessageInfo + +func (m *Field) GetKind() Field_Kind { + if m != nil { + return m.Kind + } + return Field_TYPE_UNKNOWN +} + +func (m *Field) GetCardinality() Field_Cardinality { + if m != nil { + return m.Cardinality + } + return Field_CARDINALITY_UNKNOWN +} + +func (m *Field) GetNumber() int32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *Field) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Field) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Field) GetOneofIndex() int32 { + if m != nil { + return m.OneofIndex + } + return 0 +} + +func (m *Field) GetPacked() bool { + if m != nil { + return m.Packed + } + return false +} + +func (m *Field) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Field) GetJsonName() string { + if m != nil { + return m.JsonName + } + return "" +} + +func (m *Field) GetDefaultValue() string { + if m != nil { + return m.DefaultValue + } + return "" +} + +func (*Field) XXX_MessageName() string { + return "google.protobuf.Field" +} + +// Enum type definition. +type Enum struct { + // Enum type name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value definitions. + Enumvalue []*EnumValue `protobuf:"bytes,2,rep,name=enumvalue,proto3" json:"enumvalue,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + // The source context. + SourceContext *SourceContext `protobuf:"bytes,4,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,5,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Enum) Reset() { *m = Enum{} } +func (*Enum) ProtoMessage() {} +func (*Enum) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{2} +} +func (m *Enum) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Enum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Enum.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Enum) XXX_Merge(src proto.Message) { + xxx_messageInfo_Enum.Merge(m, src) +} +func (m *Enum) XXX_Size() int { + return m.Size() +} +func (m *Enum) XXX_DiscardUnknown() { + xxx_messageInfo_Enum.DiscardUnknown(m) +} + +var xxx_messageInfo_Enum proto.InternalMessageInfo + +func (m *Enum) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Enum) GetEnumvalue() []*EnumValue { + if m != nil { + return m.Enumvalue + } + return nil +} + +func (m *Enum) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Enum) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Enum) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Enum) XXX_MessageName() string { + return "google.protobuf.Enum" +} + +// Enum value definition. +type EnumValue struct { + // Enum value name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value number. + Number int32 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValue) Reset() { *m = EnumValue{} } +func (*EnumValue) ProtoMessage() {} +func (*EnumValue) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{3} +} +func (m *EnumValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnumValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnumValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnumValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValue.Merge(m, src) +} +func (m *EnumValue) XXX_Size() int { + return m.Size() +} +func (m *EnumValue) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValue.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValue proto.InternalMessageInfo + +func (m *EnumValue) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EnumValue) GetNumber() int32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *EnumValue) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (*EnumValue) XXX_MessageName() string { + return "google.protobuf.EnumValue" +} + +// A protocol buffer option, which can be attached to a message, field, +// enumeration, etc. +type Option struct { + // The option's name. For protobuf built-in options (options defined in + // descriptor.proto), this is the short name. For example, `"map_entry"`. + // For custom options, it should be the fully-qualified name. For example, + // `"google.api.http"`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The option's value packed in an Any message. If the value is a primitive, + // the corresponding wrapper type defined in google/protobuf/wrappers.proto + // should be used. If the value is an enum, it should be stored as an int32 + // value using the google.protobuf.Int32Value type. + Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Option) Reset() { *m = Option{} } +func (*Option) ProtoMessage() {} +func (*Option) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{4} +} +func (m *Option) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Option) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Option.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Option) XXX_Merge(src proto.Message) { + xxx_messageInfo_Option.Merge(m, src) +} +func (m *Option) XXX_Size() int { + return m.Size() +} +func (m *Option) XXX_DiscardUnknown() { + xxx_messageInfo_Option.DiscardUnknown(m) +} + +var xxx_messageInfo_Option proto.InternalMessageInfo + +func (m *Option) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Option) GetValue() *Any { + if m != nil { + return m.Value + } + return nil +} + +func (*Option) XXX_MessageName() string { + return "google.protobuf.Option" +} +func init() { + proto.RegisterEnum("google.protobuf.Syntax", Syntax_name, Syntax_value) + proto.RegisterEnum("google.protobuf.Field_Kind", Field_Kind_name, Field_Kind_value) + proto.RegisterEnum("google.protobuf.Field_Cardinality", Field_Cardinality_name, Field_Cardinality_value) + proto.RegisterType((*Type)(nil), "google.protobuf.Type") + proto.RegisterType((*Field)(nil), "google.protobuf.Field") + proto.RegisterType((*Enum)(nil), "google.protobuf.Enum") + proto.RegisterType((*EnumValue)(nil), "google.protobuf.EnumValue") + proto.RegisterType((*Option)(nil), "google.protobuf.Option") +} + +func init() { proto.RegisterFile("google/protobuf/type.proto", fileDescriptor_dd271cc1e348c538) } + +var fileDescriptor_dd271cc1e348c538 = []byte{ + // 840 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x73, 0xda, 0x46, + 0x14, 0xf6, 0x0a, 0x21, 0xa3, 0x87, 0xc1, 0x9b, 0x4d, 0x26, 0x51, 0x9c, 0x19, 0x95, 0xa1, 0x3d, + 0x30, 0x39, 0xe0, 0x29, 0x78, 0x3c, 0xbd, 0x82, 0x91, 0x29, 0x63, 0x22, 0xa9, 0x8b, 0x68, 0xe2, + 0x5e, 0x18, 0x0c, 0x72, 0x86, 0x44, 0xac, 0x18, 0x24, 0x5a, 0x73, 0xeb, 0x4c, 0xcf, 0xfd, 0x27, + 0x7a, 0xea, 0xf4, 0xdc, 0x3f, 0xc2, 0xc7, 0x1e, 0x7b, 0xac, 0xc9, 0xa5, 0xc7, 0x1c, 0x73, 0x6b, + 0x67, 0x57, 0x20, 0x8b, 0x1f, 0x9d, 0x49, 0xdb, 0x1b, 0xef, 0xfb, 0xbe, 0xf7, 0x73, 0x9f, 0x1e, + 0x70, 0xf4, 0xda, 0xf7, 0x5f, 0x7b, 0xee, 0xf1, 0x64, 0xea, 0x87, 0xfe, 0xd5, 0xec, 0xfa, 0x38, + 0x9c, 0x4f, 0xdc, 0xb2, 0xb0, 0xc8, 0x61, 0xc4, 0x95, 0x57, 0xdc, 0xd1, 0xd3, 0x4d, 0x71, 0x9f, + 0xcd, 0x23, 0xf6, 0xe8, 0xb3, 0x4d, 0x2a, 0xf0, 0x67, 0xd3, 0x81, 0xdb, 0x1b, 0xf8, 0x2c, 0x74, + 0x6f, 0xc2, 0x48, 0x55, 0xfc, 0x51, 0x02, 0xd9, 0x99, 0x4f, 0x5c, 0x42, 0x40, 0x66, 0xfd, 0xb1, + 0xab, 0xa1, 0x02, 0x2a, 0xa9, 0x54, 0xfc, 0x26, 0x65, 0x50, 0xae, 0x47, 0xae, 0x37, 0x0c, 0x34, + 0xa9, 0x90, 0x2a, 0x65, 0x2b, 0x8f, 0xcb, 0x1b, 0xf9, 0xcb, 0xe7, 0x9c, 0xa6, 0x4b, 0x15, 0x79, + 0x0c, 0x8a, 0xcf, 0x5c, 0xff, 0x3a, 0xd0, 0x52, 0x85, 0x54, 0x49, 0xa5, 0x4b, 0x8b, 0x7c, 0x0e, + 0xfb, 0xfe, 0x24, 0x1c, 0xf9, 0x2c, 0xd0, 0x64, 0x11, 0xe8, 0xc9, 0x56, 0x20, 0x4b, 0xf0, 0x74, + 0xa5, 0x23, 0x06, 0xe4, 0xd7, 0xeb, 0xd5, 0xd2, 0x05, 0x54, 0xca, 0x56, 0xf4, 0x2d, 0xcf, 0x8e, + 0x90, 0x9d, 0x45, 0x2a, 0x9a, 0x0b, 0x92, 0x26, 0x39, 0x06, 0x25, 0x98, 0xb3, 0xb0, 0x7f, 0xa3, + 0x29, 0x05, 0x54, 0xca, 0xef, 0x48, 0xdc, 0x11, 0x34, 0x5d, 0xca, 0x8a, 0xbf, 0x2a, 0x90, 0x16, + 0x4d, 0x91, 0x63, 0x90, 0xdf, 0x8e, 0xd8, 0x50, 0x0c, 0x24, 0x5f, 0x79, 0xb6, 0xbb, 0xf5, 0xf2, + 0xc5, 0x88, 0x0d, 0xa9, 0x10, 0x92, 0x06, 0x64, 0x07, 0xfd, 0xe9, 0x70, 0xc4, 0xfa, 0xde, 0x28, + 0x9c, 0x6b, 0x92, 0xf0, 0x2b, 0xfe, 0x83, 0xdf, 0xd9, 0xbd, 0x92, 0x26, 0xdd, 0xf8, 0x0c, 0xd9, + 0x6c, 0x7c, 0xe5, 0x4e, 0xb5, 0x54, 0x01, 0x95, 0xd2, 0x74, 0x69, 0xc5, 0xef, 0x23, 0x27, 0xde, + 0xe7, 0x29, 0x64, 0xf8, 0x72, 0xf4, 0x66, 0x53, 0x4f, 0xf4, 0xa7, 0xd2, 0x7d, 0x6e, 0x77, 0xa7, + 0x1e, 0xf9, 0x04, 0xb2, 0x62, 0xf8, 0xbd, 0x11, 0x1b, 0xba, 0x37, 0xda, 0xbe, 0x88, 0x05, 0x02, + 0x6a, 0x71, 0x84, 0xe7, 0x99, 0xf4, 0x07, 0x6f, 0xdd, 0xa1, 0x96, 0x29, 0xa0, 0x52, 0x86, 0x2e, + 0xad, 0xe4, 0x5b, 0xa9, 0x1f, 0xf9, 0x56, 0xcf, 0x40, 0x7d, 0x13, 0xf8, 0xac, 0x27, 0xea, 0x03, + 0x51, 0x47, 0x86, 0x03, 0x26, 0xaf, 0xf1, 0x53, 0xc8, 0x0d, 0xdd, 0xeb, 0xfe, 0xcc, 0x0b, 0x7b, + 0xdf, 0xf6, 0xbd, 0x99, 0xab, 0x65, 0x85, 0xe0, 0x60, 0x09, 0x7e, 0xcd, 0xb1, 0xe2, 0xad, 0x04, + 0x32, 0x9f, 0x24, 0xc1, 0x70, 0xe0, 0x5c, 0xda, 0x46, 0xaf, 0x6b, 0x5e, 0x98, 0xd6, 0x4b, 0x13, + 0xef, 0x91, 0x43, 0xc8, 0x0a, 0xa4, 0x61, 0x75, 0xeb, 0x6d, 0x03, 0x23, 0x92, 0x07, 0x10, 0xc0, + 0x79, 0xdb, 0xaa, 0x39, 0x58, 0x8a, 0xed, 0x96, 0xe9, 0x9c, 0x9e, 0xe0, 0x54, 0xec, 0xd0, 0x8d, + 0x00, 0x39, 0x29, 0xa8, 0x56, 0x70, 0x3a, 0xce, 0x71, 0xde, 0x7a, 0x65, 0x34, 0x4e, 0x4f, 0xb0, + 0xb2, 0x8e, 0x54, 0x2b, 0x78, 0x9f, 0xe4, 0x40, 0x15, 0x48, 0xdd, 0xb2, 0xda, 0x38, 0x13, 0xc7, + 0xec, 0x38, 0xb4, 0x65, 0x36, 0xb1, 0x1a, 0xc7, 0x6c, 0x52, 0xab, 0x6b, 0x63, 0x88, 0x23, 0xbc, + 0x30, 0x3a, 0x9d, 0x5a, 0xd3, 0xc0, 0xd9, 0x58, 0x51, 0xbf, 0x74, 0x8c, 0x0e, 0x3e, 0x58, 0x2b, + 0xab, 0x5a, 0xc1, 0xb9, 0x38, 0x85, 0x61, 0x76, 0x5f, 0xe0, 0x3c, 0x79, 0x00, 0xb9, 0x28, 0xc5, + 0xaa, 0x88, 0xc3, 0x0d, 0xe8, 0xf4, 0x04, 0xe3, 0xfb, 0x42, 0xa2, 0x28, 0x0f, 0xd6, 0x80, 0xd3, + 0x13, 0x4c, 0x8a, 0x21, 0x64, 0x13, 0xbb, 0x45, 0x9e, 0xc0, 0xc3, 0xb3, 0x1a, 0x6d, 0xb4, 0xcc, + 0x5a, 0xbb, 0xe5, 0x5c, 0x26, 0xe6, 0xaa, 0xc1, 0xa3, 0x24, 0x61, 0xd9, 0x4e, 0xcb, 0x32, 0x6b, + 0x6d, 0x8c, 0x36, 0x19, 0x6a, 0x7c, 0xd5, 0x6d, 0x51, 0xa3, 0x81, 0xa5, 0x6d, 0xc6, 0x36, 0x6a, + 0x8e, 0xd1, 0xc0, 0xa9, 0xe2, 0x5f, 0x08, 0x64, 0x83, 0xcd, 0xc6, 0x3b, 0xcf, 0xc8, 0x17, 0xa0, + 0xba, 0x6c, 0x36, 0x8e, 0x9e, 0x3f, 0xba, 0x24, 0x47, 0x5b, 0x4b, 0xc5, 0xbd, 0xc5, 0x32, 0xd0, + 0x7b, 0x71, 0x72, 0x19, 0x53, 0xff, 0xf9, 0x70, 0xc8, 0xff, 0xef, 0x70, 0xa4, 0x3f, 0xee, 0x70, + 0xbc, 0x01, 0x35, 0x6e, 0x61, 0xe7, 0x14, 0xee, 0x3f, 0x6c, 0x69, 0xed, 0xc3, 0xfe, 0xf7, 0x3d, + 0x16, 0xbf, 0x04, 0x25, 0x82, 0x76, 0x26, 0x7a, 0x0e, 0xe9, 0xd5, 0xa8, 0x79, 0xe3, 0x8f, 0xb6, + 0xc2, 0xd5, 0xd8, 0x9c, 0x46, 0x92, 0xe7, 0x65, 0x50, 0xa2, 0x3e, 0xf8, 0xb2, 0x75, 0x2e, 0x4d, + 0xa7, 0xf6, 0xaa, 0x67, 0x53, 0xcb, 0xb1, 0x2a, 0x78, 0x6f, 0x13, 0xaa, 0x62, 0x54, 0xff, 0x01, + 0xfd, 0x7e, 0xa7, 0xef, 0xbd, 0xbf, 0xd3, 0xd1, 0x87, 0x3b, 0x1d, 0x7d, 0xbf, 0xd0, 0xd1, 0xcf, + 0x0b, 0x1d, 0xdd, 0x2e, 0x74, 0xf4, 0xdb, 0x42, 0x47, 0x7f, 0x2c, 0x74, 0xf4, 0xe7, 0x42, 0xdf, + 0x7b, 0xcf, 0xf1, 0x77, 0x3a, 0xba, 0x7d, 0xa7, 0x23, 0x78, 0x38, 0xf0, 0xc7, 0x9b, 0x25, 0xd4, + 0x55, 0xfe, 0x9f, 0x63, 0x73, 0xcb, 0x46, 0xdf, 0xa4, 0xf9, 0xd1, 0x0a, 0x3e, 0x20, 0xf4, 0x93, + 0x94, 0x6a, 0xda, 0xf5, 0x5f, 0x24, 0xbd, 0x19, 0xc9, 0xed, 0x55, 0xc5, 0x2f, 0x5d, 0xcf, 0xbb, + 0x60, 0xfe, 0x77, 0x8c, 0xbb, 0x05, 0x57, 0x8a, 0x88, 0x53, 0xfd, 0x3b, 0x00, 0x00, 0xff, 0xff, + 0xbc, 0x2a, 0x5e, 0x82, 0x2b, 0x07, 0x00, 0x00, +} + +func (this *Type) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Type) + if !ok { + that2, ok := that.(Type) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Fields) != len(that1.Fields) { + if len(this.Fields) < len(that1.Fields) { + return -1 + } + return 1 + } + for i := range this.Fields { + if c := this.Fields[i].Compare(that1.Fields[i]); c != 0 { + return c + } + } + if len(this.Oneofs) != len(that1.Oneofs) { + if len(this.Oneofs) < len(that1.Oneofs) { + return -1 + } + return 1 + } + for i := range this.Oneofs { + if this.Oneofs[i] != that1.Oneofs[i] { + if this.Oneofs[i] < that1.Oneofs[i] { + return -1 + } + return 1 + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Field) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Field) + if !ok { + that2, ok := that.(Field) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Kind != that1.Kind { + if this.Kind < that1.Kind { + return -1 + } + return 1 + } + if this.Cardinality != that1.Cardinality { + if this.Cardinality < that1.Cardinality { + return -1 + } + return 1 + } + if this.Number != that1.Number { + if this.Number < that1.Number { + return -1 + } + return 1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.TypeUrl != that1.TypeUrl { + if this.TypeUrl < that1.TypeUrl { + return -1 + } + return 1 + } + if this.OneofIndex != that1.OneofIndex { + if this.OneofIndex < that1.OneofIndex { + return -1 + } + return 1 + } + if this.Packed != that1.Packed { + if !this.Packed { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.JsonName != that1.JsonName { + if this.JsonName < that1.JsonName { + return -1 + } + return 1 + } + if this.DefaultValue != that1.DefaultValue { + if this.DefaultValue < that1.DefaultValue { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Enum) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Enum) + if !ok { + that2, ok := that.(Enum) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Enumvalue) != len(that1.Enumvalue) { + if len(this.Enumvalue) < len(that1.Enumvalue) { + return -1 + } + return 1 + } + for i := range this.Enumvalue { + if c := this.Enumvalue[i].Compare(that1.Enumvalue[i]); c != 0 { + return c + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *EnumValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*EnumValue) + if !ok { + that2, ok := that.(EnumValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.Number != that1.Number { + if this.Number < that1.Number { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Option) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Option) + if !ok { + that2, ok := that.(Option) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if c := this.Value.Compare(that1.Value); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (x Syntax) String() string { + s, ok := Syntax_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Field_Kind) String() string { + s, ok := Field_Kind_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Field_Cardinality) String() string { + s, ok := Field_Cardinality_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Type) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Type) + if !ok { + that2, ok := that.(Type) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Fields) != len(that1.Fields) { + return false + } + for i := range this.Fields { + if !this.Fields[i].Equal(that1.Fields[i]) { + return false + } + } + if len(this.Oneofs) != len(that1.Oneofs) { + return false + } + for i := range this.Oneofs { + if this.Oneofs[i] != that1.Oneofs[i] { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Field) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Field) + if !ok { + that2, ok := that.(Field) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Kind != that1.Kind { + return false + } + if this.Cardinality != that1.Cardinality { + return false + } + if this.Number != that1.Number { + return false + } + if this.Name != that1.Name { + return false + } + if this.TypeUrl != that1.TypeUrl { + return false + } + if this.OneofIndex != that1.OneofIndex { + return false + } + if this.Packed != that1.Packed { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.JsonName != that1.JsonName { + return false + } + if this.DefaultValue != that1.DefaultValue { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Enum) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Enum) + if !ok { + that2, ok := that.(Enum) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Enumvalue) != len(that1.Enumvalue) { + return false + } + for i := range this.Enumvalue { + if !this.Enumvalue[i].Equal(that1.Enumvalue[i]) { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *EnumValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EnumValue) + if !ok { + that2, ok := that.(EnumValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Number != that1.Number { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Option) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Option) + if !ok { + that2, ok := that.(Option) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if !this.Value.Equal(that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Type) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&types.Type{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Fields != nil { + s = append(s, "Fields: "+fmt.Sprintf("%#v", this.Fields)+",\n") + } + s = append(s, "Oneofs: "+fmt.Sprintf("%#v", this.Oneofs)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Field) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&types.Field{") + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + s = append(s, "Cardinality: "+fmt.Sprintf("%#v", this.Cardinality)+",\n") + s = append(s, "Number: "+fmt.Sprintf("%#v", this.Number)+",\n") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n") + s = append(s, "OneofIndex: "+fmt.Sprintf("%#v", this.OneofIndex)+",\n") + s = append(s, "Packed: "+fmt.Sprintf("%#v", this.Packed)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "JsonName: "+fmt.Sprintf("%#v", this.JsonName)+",\n") + s = append(s, "DefaultValue: "+fmt.Sprintf("%#v", this.DefaultValue)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Enum) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&types.Enum{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Enumvalue != nil { + s = append(s, "Enumvalue: "+fmt.Sprintf("%#v", this.Enumvalue)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&types.EnumValue{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Number: "+fmt.Sprintf("%#v", this.Number)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Option) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Option{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringType(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Type) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Type) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Type) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x30 + } + if m.SourceContext != nil { + { + size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Oneofs) > 0 { + for iNdEx := len(m.Oneofs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Oneofs[iNdEx]) + copy(dAtA[i:], m.Oneofs[iNdEx]) + i = encodeVarintType(dAtA, i, uint64(len(m.Oneofs[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Fields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Field) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Field) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Field) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = encodeVarintType(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x5a + } + if len(m.JsonName) > 0 { + i -= len(m.JsonName) + copy(dAtA[i:], m.JsonName) + i = encodeVarintType(dAtA, i, uint64(len(m.JsonName))) + i-- + dAtA[i] = 0x52 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if m.Packed { + i-- + if m.Packed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.OneofIndex != 0 { + i = encodeVarintType(dAtA, i, uint64(m.OneofIndex)) + i-- + dAtA[i] = 0x38 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = encodeVarintType(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0x32 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + } + if m.Number != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Number)) + i-- + dAtA[i] = 0x18 + } + if m.Cardinality != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Cardinality)) + i-- + dAtA[i] = 0x10 + } + if m.Kind != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Kind)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Enum) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Enum) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Enum) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x28 + } + if m.SourceContext != nil { + { + size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Enumvalue) > 0 { + for iNdEx := len(m.Enumvalue) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Enumvalue[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnumValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnumValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnumValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Number != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Number)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Option) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Option) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Option) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintType(dAtA []byte, offset int, v uint64) int { + offset -= sovType(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedType(r randyType, easy bool) *Type { + this := &Type{} + this.Name = string(randStringType(r)) + if r.Intn(5) != 0 { + v1 := r.Intn(5) + this.Fields = make([]*Field, v1) + for i := 0; i < v1; i++ { + this.Fields[i] = NewPopulatedField(r, easy) + } + } + v2 := r.Intn(10) + this.Oneofs = make([]string, v2) + for i := 0; i < v2; i++ { + this.Oneofs[i] = string(randStringType(r)) + } + if r.Intn(5) != 0 { + v3 := r.Intn(5) + this.Options = make([]*Option, v3) + for i := 0; i < v3; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if r.Intn(5) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 7) + } + return this +} + +func NewPopulatedField(r randyType, easy bool) *Field { + this := &Field{} + this.Kind = Field_Kind([]int32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}[r.Intn(19)]) + this.Cardinality = Field_Cardinality([]int32{0, 1, 2, 3}[r.Intn(4)]) + this.Number = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Number *= -1 + } + this.Name = string(randStringType(r)) + this.TypeUrl = string(randStringType(r)) + this.OneofIndex = int32(r.Int31()) + if r.Intn(2) == 0 { + this.OneofIndex *= -1 + } + this.Packed = bool(bool(r.Intn(2) == 0)) + if r.Intn(5) != 0 { + v4 := r.Intn(5) + this.Options = make([]*Option, v4) + for i := 0; i < v4; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.JsonName = string(randStringType(r)) + this.DefaultValue = string(randStringType(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 12) + } + return this +} + +func NewPopulatedEnum(r randyType, easy bool) *Enum { + this := &Enum{} + this.Name = string(randStringType(r)) + if r.Intn(5) != 0 { + v5 := r.Intn(5) + this.Enumvalue = make([]*EnumValue, v5) + for i := 0; i < v5; i++ { + this.Enumvalue[i] = NewPopulatedEnumValue(r, easy) + } + } + if r.Intn(5) != 0 { + v6 := r.Intn(5) + this.Options = make([]*Option, v6) + for i := 0; i < v6; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if r.Intn(5) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 6) + } + return this +} + +func NewPopulatedEnumValue(r randyType, easy bool) *EnumValue { + this := &EnumValue{} + this.Name = string(randStringType(r)) + this.Number = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Number *= -1 + } + if r.Intn(5) != 0 { + v7 := r.Intn(5) + this.Options = make([]*Option, v7) + for i := 0; i < v7; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 4) + } + return this +} + +func NewPopulatedOption(r randyType, easy bool) *Option { + this := &Option{} + this.Name = string(randStringType(r)) + if r.Intn(5) != 0 { + this.Value = NewPopulatedAny(r, easy) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 3) + } + return this +} + +type randyType interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneType(r randyType) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringType(r randyType) string { + v8 := r.Intn(100) + tmps := make([]rune, v8) + for i := 0; i < v8; i++ { + tmps[i] = randUTF8RuneType(r) + } + return string(tmps) +} +func randUnrecognizedType(r randyType, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldType(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldType(dAtA []byte, r randyType, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + v9 := r.Int63() + if r.Intn(2) == 0 { + v9 *= -1 + } + dAtA = encodeVarintPopulateType(dAtA, uint64(v9)) + case 1: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateType(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateType(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Type) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Oneofs) > 0 { + for _, s := range m.Oneofs { + l = len(s) + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.Syntax != 0 { + n += 1 + sovType(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Field) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != 0 { + n += 1 + sovType(uint64(m.Kind)) + } + if m.Cardinality != 0 { + n += 1 + sovType(uint64(m.Cardinality)) + } + if m.Number != 0 { + n += 1 + sovType(uint64(m.Number)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.OneofIndex != 0 { + n += 1 + sovType(uint64(m.OneofIndex)) + } + if m.Packed { + n += 2 + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + l = len(m.JsonName) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Enum) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if len(m.Enumvalue) > 0 { + for _, e := range m.Enumvalue { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.Syntax != 0 { + n += 1 + sovType(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnumValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.Number != 0 { + n += 1 + sovType(uint64(m.Number)) + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Option) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.Value != nil { + l = m.Value.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovType(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozType(x uint64) (n int) { + return sovType(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Type) String() string { + if this == nil { + return "nil" + } + repeatedStringForFields := "[]*Field{" + for _, f := range this.Fields { + repeatedStringForFields += strings.Replace(f.String(), "Field", "Field", 1) + "," + } + repeatedStringForFields += "}" + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Type{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Fields:` + repeatedStringForFields + `,`, + `Oneofs:` + fmt.Sprintf("%v", this.Oneofs) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Field) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Field{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Cardinality:` + fmt.Sprintf("%v", this.Cardinality) + `,`, + `Number:` + fmt.Sprintf("%v", this.Number) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, + `OneofIndex:` + fmt.Sprintf("%v", this.OneofIndex) + `,`, + `Packed:` + fmt.Sprintf("%v", this.Packed) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `JsonName:` + fmt.Sprintf("%v", this.JsonName) + `,`, + `DefaultValue:` + fmt.Sprintf("%v", this.DefaultValue) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Enum) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnumvalue := "[]*EnumValue{" + for _, f := range this.Enumvalue { + repeatedStringForEnumvalue += strings.Replace(f.String(), "EnumValue", "EnumValue", 1) + "," + } + repeatedStringForEnumvalue += "}" + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Enum{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Enumvalue:` + repeatedStringForEnumvalue + `,`, + `Options:` + repeatedStringForOptions + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *EnumValue) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&EnumValue{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Number:` + fmt.Sprintf("%v", this.Number) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Option) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Option{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Any", "Any", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringType(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Type) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Type: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Type: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fields = append(m.Fields, &Field{}) + if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Oneofs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Oneofs = append(m.Oneofs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Field) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Field: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Field: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + m.Kind = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kind |= Field_Kind(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cardinality", wireType) + } + m.Cardinality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Cardinality |= Field_Cardinality(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + m.Number = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Number |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OneofIndex", wireType) + } + m.OneofIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OneofIndex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Packed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Packed = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JsonName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JsonName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Enum) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Enum: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Enum: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Enumvalue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Enumvalue = append(m.Enumvalue, &EnumValue{}) + if err := m.Enumvalue[len(m.Enumvalue)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnumValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnumValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnumValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + m.Number = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Number |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Option) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Option: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Option: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Value == nil { + m.Value = &Any{} + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipType(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthType + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupType + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthType + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthType = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowType = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupType = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/wrappers.pb.go b/vendor/github.com/gogo/protobuf/types/wrappers.pb.go new file mode 100644 index 000000000..8f1edb57d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/wrappers.pb.go @@ -0,0 +1,2730 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package types + +import ( + bytes "bytes" + encoding_binary "encoding/binary" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{0} +} +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } +func (m *DoubleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(m, src) +} +func (m *DoubleValue) XXX_Size() int { + return m.Size() +} +func (m *DoubleValue) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleValue proto.InternalMessageInfo + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (*DoubleValue) XXX_MessageName() string { + return "google.protobuf.DoubleValue" +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{1} +} +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } +func (m *FloatValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(m, src) +} +func (m *FloatValue) XXX_Size() int { + return m.Size() +} +func (m *FloatValue) XXX_DiscardUnknown() { + xxx_messageInfo_FloatValue.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatValue proto.InternalMessageInfo + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +func (*FloatValue) XXX_MessageName() string { + return "google.protobuf.FloatValue" +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{2} +} +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } +func (m *Int64Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(m, src) +} +func (m *Int64Value) XXX_Size() int { + return m.Size() +} +func (m *Int64Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int64Value proto.InternalMessageInfo + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func (*Int64Value) XXX_MessageName() string { + return "google.protobuf.Int64Value" +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{3} +} +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } +func (m *UInt64Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(m, src) +} +func (m *UInt64Value) XXX_Size() int { + return m.Size() +} +func (m *UInt64Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt64Value proto.InternalMessageInfo + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +func (*UInt64Value) XXX_MessageName() string { + return "google.protobuf.UInt64Value" +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{4} +} +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } +func (m *Int32Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(m, src) +} +func (m *Int32Value) XXX_Size() int { + return m.Size() +} +func (m *Int32Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int32Value proto.InternalMessageInfo + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +func (*Int32Value) XXX_MessageName() string { + return "google.protobuf.Int32Value" +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{5} +} +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } +func (m *UInt32Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(m, src) +} +func (m *UInt32Value) XXX_Size() int { + return m.Size() +} +func (m *UInt32Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt32Value proto.InternalMessageInfo + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +func (*UInt32Value) XXX_MessageName() string { + return "google.protobuf.UInt32Value" +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{6} +} +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } +func (m *BoolValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(m, src) +} +func (m *BoolValue) XXX_Size() int { + return m.Size() +} +func (m *BoolValue) XXX_DiscardUnknown() { + xxx_messageInfo_BoolValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BoolValue proto.InternalMessageInfo + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +func (*BoolValue) XXX_MessageName() string { + return "google.protobuf.BoolValue" +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{7} +} +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } +func (m *StringValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(m, src) +} +func (m *StringValue) XXX_Size() int { + return m.Size() +} +func (m *StringValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringValue proto.InternalMessageInfo + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (*StringValue) XXX_MessageName() string { + return "google.protobuf.StringValue" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{8} +} +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } +func (m *BytesValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(m, src) +} +func (m *BytesValue) XXX_Size() int { + return m.Size() +} +func (m *BytesValue) XXX_DiscardUnknown() { + xxx_messageInfo_BytesValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesValue proto.InternalMessageInfo + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (*BytesValue) XXX_MessageName() string { + return "google.protobuf.BytesValue" +} +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) } + +var fileDescriptor_5377b62bda767935 = []byte{ + // 285 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x3b, + 0xe3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28, 0xc7, 0xd8, 0xf0, 0x48, + 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, + 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x20, 0xf1, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb, + 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x45, 0x87, 0x13, 0x6f, 0x38, 0x34, 0xbe, 0x02, + 0x40, 0x22, 0x01, 0x8c, 0x51, 0xac, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x3f, 0x18, 0x19, 0x17, 0x31, + 0x31, 0xbb, 0x07, 0x38, 0xad, 0x62, 0x92, 0x73, 0x87, 0x68, 0x09, 0x80, 0x6a, 0xd1, 0x0b, 0x4f, + 0xcd, 0xc9, 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4c, 0x62, 0x03, 0x9b, 0x65, 0x0c, + 0x08, 0x00, 0x00, 0xff, 0xff, 0x31, 0x55, 0x64, 0x90, 0x0a, 0x02, 0x00, 0x00, +} + +func (this *DoubleValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*DoubleValue) + if !ok { + that2, ok := that.(DoubleValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *FloatValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*FloatValue) + if !ok { + that2, ok := that.(FloatValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Int64Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Int64Value) + if !ok { + that2, ok := that.(Int64Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *UInt64Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*UInt64Value) + if !ok { + that2, ok := that.(UInt64Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Int32Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Int32Value) + if !ok { + that2, ok := that.(Int32Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *UInt32Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*UInt32Value) + if !ok { + that2, ok := that.(UInt32Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *BoolValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BoolValue) + if !ok { + that2, ok := that.(BoolValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if !this.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *StringValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*StringValue) + if !ok { + that2, ok := that.(StringValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *BytesValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BytesValue) + if !ok { + that2, ok := that.(BytesValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.Value, that1.Value); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *DoubleValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DoubleValue) + if !ok { + that2, ok := that.(DoubleValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *FloatValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FloatValue) + if !ok { + that2, ok := that.(FloatValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Int64Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Int64Value) + if !ok { + that2, ok := that.(Int64Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *UInt64Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UInt64Value) + if !ok { + that2, ok := that.(UInt64Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Int32Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Int32Value) + if !ok { + that2, ok := that.(Int32Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *UInt32Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UInt32Value) + if !ok { + that2, ok := that.(UInt32Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *BoolValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BoolValue) + if !ok { + that2, ok := that.(BoolValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *StringValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StringValue) + if !ok { + that2, ok := that.(StringValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *BytesValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BytesValue) + if !ok { + that2, ok := that.(BytesValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *DoubleValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.DoubleValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FloatValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.FloatValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Int64Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Int64Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UInt64Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.UInt64Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Int32Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Int32Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UInt32Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.UInt32Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BoolValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.BoolValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StringValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.StringValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BytesValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.BytesValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringWrappers(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *DoubleValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DoubleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *FloatValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FloatValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FloatValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 4 + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Value)))) + i-- + dAtA[i] = 0xd + } + return len(dAtA) - i, nil +} + +func (m *Int64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int64Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Int64Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt64Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UInt64Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Int32Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int32Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Int32Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt32Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt32Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UInt32Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BoolValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BoolValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value { + i-- + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StringValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BytesValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BytesValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintWrappers(dAtA []byte, offset int, v uint64) int { + offset -= sovWrappers(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedDoubleValue(r randyWrappers, easy bool) *DoubleValue { + this := &DoubleValue{} + this.Value = float64(r.Float64()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedFloatValue(r randyWrappers, easy bool) *FloatValue { + this := &FloatValue{} + this.Value = float32(r.Float32()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedInt64Value(r randyWrappers, easy bool) *Int64Value { + this := &Int64Value{} + this.Value = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedUInt64Value(r randyWrappers, easy bool) *UInt64Value { + this := &UInt64Value{} + this.Value = uint64(uint64(r.Uint32())) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedInt32Value(r randyWrappers, easy bool) *Int32Value { + this := &Int32Value{} + this.Value = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedUInt32Value(r randyWrappers, easy bool) *UInt32Value { + this := &UInt32Value{} + this.Value = uint32(r.Uint32()) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedBoolValue(r randyWrappers, easy bool) *BoolValue { + this := &BoolValue{} + this.Value = bool(bool(r.Intn(2) == 0)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedStringValue(r randyWrappers, easy bool) *StringValue { + this := &StringValue{} + this.Value = string(randStringWrappers(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedBytesValue(r randyWrappers, easy bool) *BytesValue { + this := &BytesValue{} + v1 := r.Intn(100) + this.Value = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +type randyWrappers interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneWrappers(r randyWrappers) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringWrappers(r randyWrappers) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneWrappers(r) + } + return string(tmps) +} +func randUnrecognizedWrappers(r randyWrappers, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldWrappers(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldWrappers(dAtA []byte, r randyWrappers, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateWrappers(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *DoubleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *FloatValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 5 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Int64Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UInt64Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Int32Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UInt32Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BoolValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StringValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWrappers(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BytesValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWrappers(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovWrappers(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozWrappers(x uint64) (n int) { + return sovWrappers(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DoubleValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DoubleValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *FloatValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FloatValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Int64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Int64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *UInt64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UInt64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Int32Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Int32Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *UInt32Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UInt32Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BoolValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BoolValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *StringValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StringValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BytesValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BytesValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringWrappers(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DoubleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FloatValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FloatValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FloatValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Value = float32(math.Float32frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int32Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt32Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BoolValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BoolValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BoolValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StringValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StringValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StringValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWrappers + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWrappers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BytesValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthWrappers + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthWrappers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWrappers(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthWrappers + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupWrappers + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthWrappers + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthWrappers = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWrappers = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupWrappers = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/types/wrappers_gogo.go new file mode 100644 index 000000000..d905df360 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/wrappers_gogo.go @@ -0,0 +1,300 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +func NewPopulatedStdDouble(r randyWrappers, easy bool) *float64 { + v := NewPopulatedDoubleValue(r, easy) + return &v.Value +} + +func SizeOfStdDouble(v float64) int { + pv := &DoubleValue{Value: v} + return pv.Size() +} + +func StdDoubleMarshal(v float64) ([]byte, error) { + size := SizeOfStdDouble(v) + buf := make([]byte, size) + _, err := StdDoubleMarshalTo(v, buf) + return buf, err +} + +func StdDoubleMarshalTo(v float64, data []byte) (int, error) { + pv := &DoubleValue{Value: v} + return pv.MarshalTo(data) +} + +func StdDoubleUnmarshal(v *float64, data []byte) error { + pv := &DoubleValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdFloat(r randyWrappers, easy bool) *float32 { + v := NewPopulatedFloatValue(r, easy) + return &v.Value +} + +func SizeOfStdFloat(v float32) int { + pv := &FloatValue{Value: v} + return pv.Size() +} + +func StdFloatMarshal(v float32) ([]byte, error) { + size := SizeOfStdFloat(v) + buf := make([]byte, size) + _, err := StdFloatMarshalTo(v, buf) + return buf, err +} + +func StdFloatMarshalTo(v float32, data []byte) (int, error) { + pv := &FloatValue{Value: v} + return pv.MarshalTo(data) +} + +func StdFloatUnmarshal(v *float32, data []byte) error { + pv := &FloatValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdInt64(r randyWrappers, easy bool) *int64 { + v := NewPopulatedInt64Value(r, easy) + return &v.Value +} + +func SizeOfStdInt64(v int64) int { + pv := &Int64Value{Value: v} + return pv.Size() +} + +func StdInt64Marshal(v int64) ([]byte, error) { + size := SizeOfStdInt64(v) + buf := make([]byte, size) + _, err := StdInt64MarshalTo(v, buf) + return buf, err +} + +func StdInt64MarshalTo(v int64, data []byte) (int, error) { + pv := &Int64Value{Value: v} + return pv.MarshalTo(data) +} + +func StdInt64Unmarshal(v *int64, data []byte) error { + pv := &Int64Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdUInt64(r randyWrappers, easy bool) *uint64 { + v := NewPopulatedUInt64Value(r, easy) + return &v.Value +} + +func SizeOfStdUInt64(v uint64) int { + pv := &UInt64Value{Value: v} + return pv.Size() +} + +func StdUInt64Marshal(v uint64) ([]byte, error) { + size := SizeOfStdUInt64(v) + buf := make([]byte, size) + _, err := StdUInt64MarshalTo(v, buf) + return buf, err +} + +func StdUInt64MarshalTo(v uint64, data []byte) (int, error) { + pv := &UInt64Value{Value: v} + return pv.MarshalTo(data) +} + +func StdUInt64Unmarshal(v *uint64, data []byte) error { + pv := &UInt64Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdInt32(r randyWrappers, easy bool) *int32 { + v := NewPopulatedInt32Value(r, easy) + return &v.Value +} + +func SizeOfStdInt32(v int32) int { + pv := &Int32Value{Value: v} + return pv.Size() +} + +func StdInt32Marshal(v int32) ([]byte, error) { + size := SizeOfStdInt32(v) + buf := make([]byte, size) + _, err := StdInt32MarshalTo(v, buf) + return buf, err +} + +func StdInt32MarshalTo(v int32, data []byte) (int, error) { + pv := &Int32Value{Value: v} + return pv.MarshalTo(data) +} + +func StdInt32Unmarshal(v *int32, data []byte) error { + pv := &Int32Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdUInt32(r randyWrappers, easy bool) *uint32 { + v := NewPopulatedUInt32Value(r, easy) + return &v.Value +} + +func SizeOfStdUInt32(v uint32) int { + pv := &UInt32Value{Value: v} + return pv.Size() +} + +func StdUInt32Marshal(v uint32) ([]byte, error) { + size := SizeOfStdUInt32(v) + buf := make([]byte, size) + _, err := StdUInt32MarshalTo(v, buf) + return buf, err +} + +func StdUInt32MarshalTo(v uint32, data []byte) (int, error) { + pv := &UInt32Value{Value: v} + return pv.MarshalTo(data) +} + +func StdUInt32Unmarshal(v *uint32, data []byte) error { + pv := &UInt32Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdBool(r randyWrappers, easy bool) *bool { + v := NewPopulatedBoolValue(r, easy) + return &v.Value +} + +func SizeOfStdBool(v bool) int { + pv := &BoolValue{Value: v} + return pv.Size() +} + +func StdBoolMarshal(v bool) ([]byte, error) { + size := SizeOfStdBool(v) + buf := make([]byte, size) + _, err := StdBoolMarshalTo(v, buf) + return buf, err +} + +func StdBoolMarshalTo(v bool, data []byte) (int, error) { + pv := &BoolValue{Value: v} + return pv.MarshalTo(data) +} + +func StdBoolUnmarshal(v *bool, data []byte) error { + pv := &BoolValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdString(r randyWrappers, easy bool) *string { + v := NewPopulatedStringValue(r, easy) + return &v.Value +} + +func SizeOfStdString(v string) int { + pv := &StringValue{Value: v} + return pv.Size() +} + +func StdStringMarshal(v string) ([]byte, error) { + size := SizeOfStdString(v) + buf := make([]byte, size) + _, err := StdStringMarshalTo(v, buf) + return buf, err +} + +func StdStringMarshalTo(v string, data []byte) (int, error) { + pv := &StringValue{Value: v} + return pv.MarshalTo(data) +} + +func StdStringUnmarshal(v *string, data []byte) error { + pv := &StringValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdBytes(r randyWrappers, easy bool) *[]byte { + v := NewPopulatedBytesValue(r, easy) + return &v.Value +} + +func SizeOfStdBytes(v []byte) int { + pv := &BytesValue{Value: v} + return pv.Size() +} + +func StdBytesMarshal(v []byte) ([]byte, error) { + size := SizeOfStdBytes(v) + buf := make([]byte, size) + _, err := StdBytesMarshalTo(v, buf) + return buf, err +} + +func StdBytesMarshalTo(v []byte, data []byte) (int, error) { + pv := &BytesValue{Value: v} + return pv.MarshalTo(data) +} + +func StdBytesUnmarshal(v *[]byte, data []byte) error { + pv := &BytesValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go new file mode 100644 index 000000000..e810e6fea --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/buffer.go @@ -0,0 +1,324 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + WireVarint = 0 + WireFixed32 = 5 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 +) + +// EncodeVarint returns the varint encoded bytes of v. +func EncodeVarint(v uint64) []byte { + return protowire.AppendVarint(nil, v) +} + +// SizeVarint returns the length of the varint encoded bytes of v. +// This is equal to len(EncodeVarint(v)). +func SizeVarint(v uint64) int { + return protowire.SizeVarint(v) +} + +// DecodeVarint parses a varint encoded integer from b, +// returning the integer value and the length of the varint. +// It returns (0, 0) if there is a parse error. +func DecodeVarint(b []byte) (uint64, int) { + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, 0 + } + return v, n +} + +// Buffer is a buffer for encoding and decoding the protobuf wire format. +// It may be reused between invocations to reduce memory usage. +type Buffer struct { + buf []byte + idx int + deterministic bool +} + +// NewBuffer allocates a new Buffer initialized with buf, +// where the contents of buf are considered the unread portion of the buffer. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{buf: buf} +} + +// SetDeterministic specifies whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (b *Buffer) SetDeterministic(deterministic bool) { + b.deterministic = deterministic +} + +// SetBuf sets buf as the internal buffer, +// where the contents of buf are considered the unread portion of the buffer. +func (b *Buffer) SetBuf(buf []byte) { + b.buf = buf + b.idx = 0 +} + +// Reset clears the internal buffer of all written and unread data. +func (b *Buffer) Reset() { + b.buf = b.buf[:0] + b.idx = 0 +} + +// Bytes returns the internal buffer. +func (b *Buffer) Bytes() []byte { + return b.buf +} + +// Unread returns the unread portion of the buffer. +func (b *Buffer) Unread() []byte { + return b.buf[b.idx:] +} + +// Marshal appends the wire-format encoding of m to the buffer. +func (b *Buffer) Marshal(m Message) error { + var err error + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// Unmarshal parses the wire-format message in the buffer and +// places the decoded results in m. +// It does not reset m before unmarshaling. +func (b *Buffer) Unmarshal(m Message) error { + err := UnmarshalMerge(b.Unread(), m) + b.idx = len(b.buf) + return err +} + +type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields } + +func (m *unknownFields) String() string { panic("not implemented") } +func (m *unknownFields) Reset() { panic("not implemented") } +func (m *unknownFields) ProtoMessage() { panic("not implemented") } + +// DebugPrint dumps the encoded bytes of b with a header and footer including s +// to stdout. This is only intended for debugging. +func (*Buffer) DebugPrint(s string, b []byte) { + m := MessageReflect(new(unknownFields)) + m.SetUnknown(b) + b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface()) + fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s) +} + +// EncodeVarint appends an unsigned varint encoding to the buffer. +func (b *Buffer) EncodeVarint(v uint64) error { + b.buf = protowire.AppendVarint(b.buf, v) + return nil +} + +// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag32(v uint64) error { + return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) +} + +// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag64(v uint64) error { + return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63)))) +} + +// EncodeFixed32 appends a 32-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed32(v uint64) error { + b.buf = protowire.AppendFixed32(b.buf, uint32(v)) + return nil +} + +// EncodeFixed64 appends a 64-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed64(v uint64) error { + b.buf = protowire.AppendFixed64(b.buf, uint64(v)) + return nil +} + +// EncodeRawBytes appends a length-prefixed raw bytes to the buffer. +func (b *Buffer) EncodeRawBytes(v []byte) error { + b.buf = protowire.AppendBytes(b.buf, v) + return nil +} + +// EncodeStringBytes appends a length-prefixed raw bytes to the buffer. +// It does not validate whether v contains valid UTF-8. +func (b *Buffer) EncodeStringBytes(v string) error { + b.buf = protowire.AppendString(b.buf, v) + return nil +} + +// EncodeMessage appends a length-prefixed encoded message to the buffer. +func (b *Buffer) EncodeMessage(m Message) error { + var err error + b.buf = protowire.AppendVarint(b.buf, uint64(Size(m))) + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// DecodeVarint consumes an encoded unsigned varint from the buffer. +func (b *Buffer) DecodeVarint() (uint64, error) { + v, n := protowire.ConsumeVarint(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag32() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil +} + +// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag64() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil +} + +// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed32() (uint64, error) { + v, n := protowire.ConsumeFixed32(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed64() (uint64, error) { + v, n := protowire.ConsumeFixed64(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer. +// If alloc is specified, it returns a copy the raw bytes +// rather than a sub-slice of the buffer. +func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) { + v, n := protowire.ConsumeBytes(b.buf[b.idx:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + b.idx += n + if alloc { + v = append([]byte(nil), v...) + } + return v, nil +} + +// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer. +// It does not validate whether the raw bytes contain valid UTF-8. +func (b *Buffer) DecodeStringBytes() (string, error) { + v, n := protowire.ConsumeString(b.buf[b.idx:]) + if n < 0 { + return "", protowire.ParseError(n) + } + b.idx += n + return v, nil +} + +// DecodeMessage consumes a length-prefixed message from the buffer. +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeMessage(m Message) error { + v, err := b.DecodeRawBytes(false) + if err != nil { + return err + } + return UnmarshalMerge(v, m) +} + +// DecodeGroup consumes a message group from the buffer. +// It assumes that the start group marker has already been consumed and +// consumes all bytes until (and including the end group marker). +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeGroup(m Message) error { + v, n, err := consumeGroup(b.buf[b.idx:]) + if err != nil { + return err + } + b.idx += n + return UnmarshalMerge(v, m) +} + +// consumeGroup parses b until it finds an end group marker, returning +// the raw bytes of the message (excluding the end group marker) and the +// the total length of the message (including the end group marker). +func consumeGroup(b []byte) ([]byte, int, error) { + b0 := b + depth := 1 // assume this follows a start group marker + for { + _, wtyp, tagLen := protowire.ConsumeTag(b) + if tagLen < 0 { + return nil, 0, protowire.ParseError(tagLen) + } + b = b[tagLen:] + + var valLen int + switch wtyp { + case protowire.VarintType: + _, valLen = protowire.ConsumeVarint(b) + case protowire.Fixed32Type: + _, valLen = protowire.ConsumeFixed32(b) + case protowire.Fixed64Type: + _, valLen = protowire.ConsumeFixed64(b) + case protowire.BytesType: + _, valLen = protowire.ConsumeBytes(b) + case protowire.StartGroupType: + depth++ + case protowire.EndGroupType: + depth-- + default: + return nil, 0, errors.New("proto: cannot parse reserved wire type") + } + if valLen < 0 { + return nil, 0, protowire.ParseError(valLen) + } + b = b[valLen:] + + if depth == 0 { + return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index 3cd3249f7..000000000 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,253 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08be..000000000 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go new file mode 100644 index 000000000..d399bf069 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/defaults.go @@ -0,0 +1,63 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// SetDefaults sets unpopulated scalar fields to their default values. +// Fields within a oneof are not set even if they have a default value. +// SetDefaults is recursively called upon any populated message fields. +func SetDefaults(m Message) { + if m != nil { + setDefaults(MessageReflect(m)) + } +} + +func setDefaults(m protoreflect.Message) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if !m.Has(fd) { + if fd.HasDefault() && fd.ContainingOneof() == nil { + v := fd.Default() + if fd.Kind() == protoreflect.BytesKind { + v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes + } + m.Set(fd, v) + } + continue + } + } + + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + setDefaults(m.Get(fd).Message()) + } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + setDefaults(ls.Get(i).Message()) + } + } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + setDefaults(v.Message()) + return true + }) + } + } + return true + }) +} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go index 35b882c09..e8db57e09 100644 --- a/vendor/github.com/golang/protobuf/proto/deprecated.go +++ b/vendor/github.com/golang/protobuf/proto/deprecated.go @@ -1,63 +1,113 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -import "errors" +import ( + "encoding/json" + "errors" + "fmt" + "strconv" -// Deprecated: do not use. + protoV2 "google.golang.org/protobuf/proto" +) + +var ( + // Deprecated: No longer returned. + ErrNil = errors.New("proto: Marshal called with nil") + + // Deprecated: No longer returned. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") + + // Deprecated: No longer returned. + ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") +) + +// Deprecated: Do not use. type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } -// Deprecated: do not use. +// Deprecated: Do not use. func GetStats() Stats { return Stats{} } -// Deprecated: do not use. +// Deprecated: Do not use. func MarshalMessageSet(interface{}) ([]byte, error) { return nil, errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func UnmarshalMessageSet([]byte, interface{}) error { return errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func MarshalMessageSetJSON(interface{}) ([]byte, error) { return nil, errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func UnmarshalMessageSetJSON([]byte, interface{}) error { return errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func RegisterMessageSetType(Message, int32, string) {} + +// Deprecated: Do not use. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// Deprecated: Do not use. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// Deprecated: Do not use; this type existed for intenal-use only. +type InternalMessageInfo struct{} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) DiscardUnknown(m Message) { + DiscardUnknown(m) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) { + return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Size(m Message) int { + return protoV2.Size(MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error { + return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m)) +} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go index dea2617ce..2187e877f 100644 --- a/vendor/github.com/golang/protobuf/proto/discard.go +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -1,48 +1,13 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" + "google.golang.org/protobuf/reflect/protoreflect" ) -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - // DiscardUnknown recursively discards all unknown fields from this message // and all embedded messages. // @@ -51,300 +16,43 @@ type generatedDiscarder interface { // marshal to be able to produce a message that continues to have those // unrecognized fields. To avoid this, DiscardUnknown is used to // explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di + if m != nil { + discardUnknown(MessageReflect(m)) } - return di } -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) +func discardUnknown(m protoreflect.Message) { + m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + discardUnknown(m.Get(fd).Message()) } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - di.discard(sp) - } - } - } - default: // E.g., *pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - di.discard(sp) - } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + discardUnknown(ls.Get(i).Message()) } } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + discardUnknown(v.Message()) + return true + }) } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue } - vf := v.Field(i) - tf := f.Type + return true + }) - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } + // Discard unknown fields. + if len(m.GetUnknown()) > 0 { + m.SetUnknown(nil) } } diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 3abfed2cf..000000000 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,203 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index f9b6e41b3..000000000 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,301 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1 := extensionAsLegacyType(e1.value) - m2 := extensionAsLegacyType(e2.value) - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index fa88add30..42fc120c9 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -1,607 +1,356 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -/* - * Types and routines for supporting protocol buffer extensions. - */ - import ( "errors" "fmt" - "io" "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} +type ( + // ExtensionDesc represents an extension descriptor and + // is used to interact with an extension field in a message. + // + // Variables of this type are generated in code by protoc-gen-go. + ExtensionDesc = protoimpl.ExtensionInfo -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} + // ExtensionRange represents a range of message extensions. + // Used in code generated by protoc-gen-go. + ExtensionRange = protoiface.ExtensionRangeV1 -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} + // Deprecated: Do not use; this is an internal type. + Extension = protoimpl.ExtensionFieldV1 -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} + // Deprecated: Do not use; this is an internal type. + XXX_InternalExtensions = protoimpl.ExtensionFields +) -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} +// ErrMissingExtension reports whether the extension was not present. +var ErrMissingExtension = errors.New("proto: missing extension") var errNotExtendable = errors.New("proto: not an extendable proto.Message") -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension +// HasExtension reports whether the extension field is present in m +// either as an explicitly populated field or as an unknown field. +func HasExtension(m Message, xt *ExtensionDesc) (has bool) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return false } -} -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension + // Check whether any populated known field matches the field number. + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + has = mr.Has(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + has = int32(fd.Number()) == xt.Field + return !has }) - e.p.extensionMap = make(map[int32]Extension) } - return e.p.extensionMap -} -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil + // Check whether any unknown field matches the field number. + for b := mr.GetUnknown(); !has && len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + has = int32(num) == xt.Field + b = b[n:] } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - - // value is a concrete value for the extension field. Let the type of - // desc.ExtensionType be the "API type" and the type of Extension.value - // be the "storage type". The API type and storage type are the same except: - // * For scalars (except []byte), the API type uses *T, - // while the storage type uses T. - // * For repeated fields, the API type uses []T, while the storage type - // uses *[]T. - // - // The reason for the divergence is so that the storage type more naturally - // matches what is expected of when retrieving the values through the - // protobuf reflection APIs. - // - // The value may only be populated if desc is also populated. - value interface{} - - // enc is the raw bytes for the extension field. - enc []byte + return has } -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, err := extendable(base) - if err != nil { +// ClearExtension removes the extension field from m +// either as an explicitly populated field or as an unknown field. +func ClearExtension(m Message, xt *ExtensionDesc) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + mr.Clear(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if int32(fd.Number()) == xt.Field { + mr.Clear(fd) + return false + } return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false + }) } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok + clearUnknown(mr, fieldNum(xt.Field)) } -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, err := extendable(pb) - if err != nil { +// ClearAllExtensions clears all extensions from m. +// This includes populated fields and unknown fields in the extension range. +func ClearAllExtensions(m Message) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) + + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if fd.IsExtension() { + mr.Clear(fd) + } + return true + }) + clearUnknown(mr, mr.Descriptor().ExtensionRanges()) } -// GetExtension retrieves a proto2 extended field from pb. +// GetExtension retrieves a proto2 extended field from m. // // If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), // then GetExtension parses the encoded field and returns a Go value of the specified type. // If the field is not present, then the default value is returned (if one is specified), // otherwise ErrMissingExtension is reported. // -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err +// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes for the extension field. +func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } + + // Retrieve the unknown fields for this extension field. + var bo protoreflect.RawFields + for bi := mr.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if int32(num) == xt.Field { + bo = append(bo, bi[:n]...) } + bi = bi[n:] } - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return extensionAsLegacyType(e.value), nil + // For type incomplete descriptors, only retrieve the unknown fields. + if xt.ExtensionType == nil { + return []byte(bo), nil } - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil + // If the extension field only exists as unknown fields, unmarshal it. + // This is rarely done since proto.Unmarshal eagerly unmarshals extensions. + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err + if !mr.Has(xtd) && len(bo) > 0 { + m2 := mr.New() + if err := (proto.UnmarshalOptions{ + Resolver: extensionResolver{xt}, + }.Unmarshal(bo, m2.Interface())); err != nil { + return nil, err + } + if m2.Has(xtd) { + mr.Set(xtd, m2.Get(xtd)) + clearUnknown(mr, fieldNum(xt.Field)) + } } - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = extensionAsStorageType(v) - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return extensionAsLegacyType(e.value), nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default + // Check whether the message has the extension field set or a default. + var pv protoreflect.Value + switch { + case mr.Has(xtd): + pv = mr.Get(xtd) + case xtd.HasDefault(): + pv = xtd.Default() + default: return nil, ErrMissingExtension } - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension + v := xt.InterfaceOf(pv) + rv := reflect.ValueOf(v) + if isScalarKind(rv.Kind()) { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() } + return v, nil +} - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } +// extensionResolver is a custom extension resolver that stores a single +// extension type that takes precedence over the global registry. +type extensionResolver struct{ xt protoreflect.ExtensionType } - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) +func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { + return r.xt, nil } - return value.Interface(), nil + return protoregistry.GlobalTypes.FindExtensionByName(field) } -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } +func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { + return r.xt, nil } - return value.Interface(), nil + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) } -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err +// GetExtensions returns a list of the extensions values present in m, +// corresponding with the provided list of extension descriptors, xts. +// If an extension is missing in m, the corresponding value is nil. +func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return nil, errNotExtendable } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } + + vs := make([]interface{}, len(xts)) + for i, xt := range xts { + v, err := GetExtension(m, xt) if err != nil { - return + if err == ErrMissingExtension { + continue + } + return vs, err } + vs[i] = v } - return + return vs, nil } -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err +// SetExtension sets an extension field in m to the provided value. +func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return errNotExtendable } - registeredExtensions := RegisteredExtensions(pb) - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil + rv := reflect.ValueOf(v) + if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) + } + if isScalarKind(rv.Elem().Kind()) { + v = rv.Elem().Interface() } - - extensions = append(extensions, desc) } - return extensions, nil -} -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} + mr.Set(xtd, xt.ValueOf(v)) + clearUnknown(mr, fieldNum(xt.Field)) return nil } -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, err := extendable(pb) - if err != nil { +// SetRawExtension inserts b into the unknown fields of m. +// +// Deprecated: Use Message.ProtoReflect.SetUnknown instead. +func SetRawExtension(m Message, fnum int32, b []byte) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) + + // Verify that the raw field is valid. + for b0 := b; len(b0) > 0; { + num, _, n := protowire.ConsumeField(b0) + if int32(num) != fnum { + panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) + } + b0 = b0[n:] } -} -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. + ClearExtension(m, &ExtensionDesc{Field: fnum}) + mr.SetUnknown(append(mr.GetUnknown(), b...)) +} -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) +// ExtensionDescs returns a list of extension descriptors found in m, +// containing descriptors for both populated extension fields in m and +// also unknown fields of m that are in the extension range. +// For the later case, an type incomplete descriptor is provided where only +// the ExtensionDesc.Field field is populated. +// The order of the extension descriptors is undefined. +func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m + // Collect a set of known extension descriptors. + extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) + mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + xt := fd.(protoreflect.ExtensionTypeDescriptor) + if xd, ok := xt.Type().(*ExtensionDesc); ok { + extDescs[fd.Number()] = xd + } + } + return true + }) + + // Collect a set of unknown extension descriptors. + extRanges := mr.Descriptor().ExtensionRanges() + for b := mr.GetUnknown(); len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + if extRanges.Has(num) && extDescs[num] == nil { + extDescs[num] = nil + } + b = b[n:] } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + + // Transpose the set of descriptors into a list. + var xts []*ExtensionDesc + for num, xt := range extDescs { + if xt == nil { + xt = &ExtensionDesc{Field: int32(num)} + } + xts = append(xts, xt) } - m[desc.Field] = desc + return xts, nil } -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] +// isValidExtension reports whether xtd is a valid extension descriptor for md. +func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { + return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) } -// extensionAsLegacyType converts an value in the storage type as the API type. -// See Extension.value. -func extensionAsLegacyType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { +// isScalarKind reports whether k is a protobuf scalar kind (except bytes). +// This function exists for historical reasons since the representation of +// scalars differs between v1 and v2, where v1 uses *T and v2 uses T. +func isScalarKind(k reflect.Kind) bool { + switch k { case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - // Represent primitive types as a pointer to the value. - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Slice: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } + return true + default: + return false } - return v } -// extensionAsStorageType converts an value in the API type as the storage type. -// See Extension.value. -func extensionAsStorageType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - case reflect.Slice: - // Represent slice types as a pointer to the value. - if rv.Type().Elem().Kind() != reflect.Uint8 { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() +// clearUnknown removes unknown fields from m where remover.Has reports true. +func clearUnknown(m protoreflect.Message, remover interface { + Has(protoreflect.FieldNumber) bool +}) { + var bo protoreflect.RawFields + for bi := m.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if !remover.Has(num) { + bo = append(bo, bi[:n]...) } + bi = bi[n:] } - return v + if bi := m.GetUnknown(); len(bi) != len(bo) { + m.SetUnknown(bo) + } +} + +type fieldNum protoreflect.FieldNumber + +func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { + return protoreflect.FieldNumber(n1) == n2 } diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index fdd328bb7..000000000 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,965 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion2 = true - - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion1 = true -) - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index f48a75676..000000000 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "errors" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index 94fa9194a..000000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,360 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - if deref { - u = u.Elem() - } - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index dbfffe071..000000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,313 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } else { - // The interface is not of pointer type. The data word is the pointer - // to the data. - p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} - } - if deref { - p.p = *(*unsafe.Pointer)(p.p) - } - return p -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index a4b8c0cd3..dcdc2202f 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -1,162 +1,104 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - import ( "fmt" - "log" "reflect" - "sort" "strconv" "strings" "sync" -) - -const debug bool = false -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" ) -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. +// StructProperties represents protocol buffer type information for a +// generated protobuf message in the open-struct API. +// +// Deprecated: Do not use. type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order + // Prop are the properties for each field. + // + // Fields belonging to a oneof are stored in OneofTypes instead, with a + // single Properties representing the parent oneof held here. + // + // The order of Prop matches the order of fields in the Go struct. + // Struct fields that are not related to protobufs have a "XXX_" prefix + // in the Properties.Name and must be ignored by the user. + Prop []*Properties // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. + // It is keyed by the protobuf field name. OneofTypes map[string]*OneofProperties } -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. +// Properties represents the type information for a protobuf message field. +// +// Deprecated: Do not use. type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string + // Name is a placeholder name with little meaningful semantic value. + // If the name has an "XXX_" prefix, the entire Properties must be ignored. + Name string + // OrigName is the protobuf field name or oneof name. + OrigName string + // JSONName is the JSON name for the protobuf field. + JSONName string + // Enum is a placeholder name for enums. + // For historical reasons, this is neither the Go name for the enum, + // nor the protobuf name for the enum. + Enum string // Deprecated: Do not use. + // Weak contains the full name of the weakly referenced message. + Weak string + // Wire is a string representation of the wire type. + Wire string + // WireType is the protobuf wire type for the field. WireType int - Tag int + // Tag is the protobuf field number. + Tag int + // Required reports whether this is a required field. Required bool + // Optional reports whether this is a optional field. Optional bool + // Repeated reports whether this is a repeated field. Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only + // Packed reports whether this is a packed repeated field of scalars. + Packed bool + // Proto3 reports whether this field operates under the proto3 syntax. + Proto3 bool + // Oneof reports whether this field belongs within a oneof. + Oneof bool + + // Default is the default value in string form. + Default string + // HasDefault reports whether the field has a default value. + HasDefault bool + + // MapKeyProp is the properties for the key field for a map field. + MapKeyProp *Properties + // MapValProp is the properties for the value field for a map field. + MapValProp *Properties +} - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only +// OneofProperties represents the type information for a protobuf oneof. +// +// Deprecated: Do not use. +type OneofProperties struct { + // Type is a pointer to the generated wrapper type for the field value. + // This is nil for messages that are not in the open-struct API. + Type reflect.Type + // Field is the index into StructProperties.Prop for the containing oneof. + Field int + // Prop is the properties for the field. + Prop *Properties } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) + s += "," + strconv.Itoa(p.Tag) if p.Required { s += ",req" } @@ -170,18 +112,21 @@ func (p *Properties) String() string { s += ",packed" } s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { + if p.JSONName != "" { s += ",json=" + p.JSONName } - if p.proto3 { + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if len(p.Weak) > 0 { + s += ",weak=" + p.Weak + } + if p.Proto3 { s += ",proto3" } - if p.oneof { + if p.Oneof { s += ",oneof" } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } if p.HasDefault { s += ",def=" + p.Default } @@ -189,356 +134,173 @@ func (p *Properties) String() string { } // Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - log.Printf("proto: tag has too few fields: %q", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - log.Printf("proto: tag has unknown wire type: %q", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": +func (p *Properties) Parse(tag string) { + // For example: "bytes,49,opt,name=foo,def=hello!" + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { + i = len(tag) + } + switch s := tag[:i]; { + case strings.HasPrefix(s, "name="): + p.OrigName = s[len("name="):] + case strings.HasPrefix(s, "json="): + p.JSONName = s[len("json="):] + case strings.HasPrefix(s, "enum="): + p.Enum = s[len("enum="):] + case strings.HasPrefix(s, "weak="): + p.Weak = s[len("weak="):] + case strings.Trim(s, "0123456789") == "": + n, _ := strconv.ParseUint(s, 10, 32) + p.Tag = int(n) + case s == "opt": p.Optional = true - case f == "rep": + case s == "req": + p.Required = true + case s == "rep": p.Repeated = true - case f == "packed": + case s == "varint" || s == "zigzag32" || s == "zigzag64": + p.Wire = s + p.WireType = WireVarint + case s == "fixed32": + p.Wire = s + p.WireType = WireFixed32 + case s == "fixed64": + p.Wire = s + p.WireType = WireFixed64 + case s == "bytes": + p.Wire = s + p.WireType = WireBytes + case s == "group": + p.Wire = s + p.WireType = WireStartGroup + case s == "packed": p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): + case s == "proto3": + p.Proto3 = true + case s == "oneof": + p.Oneof = true + case strings.HasPrefix(s, "def="): + // The default tag is special in that everything afterwards is the + // default regardless of the presence of commas. p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - switch t1 := typ; t1.Kind() { - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - - case reflect.Slice: - if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { - p.stype = t2.Elem() - } - - case reflect.Map: - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) + p.Default, i = tag[len("def="):], len(tag) } + tag = strings.TrimPrefix(tag[i:], ",") } } -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - // Init populates the properties from a protocol buffer struct tag. +// +// Deprecated: Do not use. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name if tag == "" { return } p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) + + if typ != nil && typ.Kind() == reflect.Map { + p.MapKeyProp = new(Properties) + p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) + p.MapValProp = new(Properties) + p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) + } } -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) +var propertiesCache sync.Map // map[reflect.Type]*StructProperties -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. +// GetProperties returns the list of properties for the type represented by t, +// which must be a generated protocol buffer message in the open-struct API, +// where protobuf message fields are represented by exported Go struct fields. +// +// Deprecated: Use protobuf reflection instead. func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop + if p, ok := propertiesCache.Load(t); ok { + return p.(*StructProperties) } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop + p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) + return p.(*StructProperties) } -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop +func newProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) } + var hasOneof bool prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) + // Construct a list of properties for each field in the struct. for i := 0; i < t.NumField(); i++ { - f := t.Field(i) p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + f := t.Field(i) + tagField := f.Tag.Get("protobuf") + p.Init(f.Type, f.Name, tagField, &f) - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof + tagOneof := f.Tag.Get("protobuf_oneof") + if tagOneof != "" { + hasOneof = true + p.OrigName = tagOneof } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") + + // Rename unrelated struct fields with the "XXX_" prefix since so much + // user code simply checks for this to exclude special fields. + if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { + p.Name = "XXX_" + p.Name + p.OrigName = "XXX_" + p.OrigName + } else if p.Weak != "" { + p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field } + + prop.Prop = append(prop.Prop, p) } - // Re-order prop.order. - sort.Sort(prop) + // Construct a mapping of oneof field names to properties. + if hasOneof { + var oneofWrappers []interface{} + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) + } + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) + } + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { + if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { + oneofWrappers = m.ProtoMessageInfo().OneofWrappers + } + } - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() - } - if len(oots) > 0 { - // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T + for _, wrapper := range oneofWrappers { + p := &OneofProperties{ + Type: reflect.ValueOf(wrapper).Type(), // *T Prop: new(Properties), } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue + f := p.Type.Elem().Field(0) + p.Prop.Name = f.Name + p.Prop.Parse(f.Tag.Get("protobuf")) + + // Determine the struct field that contains this oneof. + // Each wrapper is assignable to exactly one parent field. + var foundOneof bool + for i := 0; i < t.NumField() && !foundOneof; i++ { + if p.Type.AssignableTo(t.Field(i).Type) { + p.Field = i + foundOneof = true } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ + if !foundOneof { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) + } + prop.OneofTypes[p.Prop.OrigName] = p } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i } - prop.reqCount = reqCount return prop } -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } +func (sp *StructProperties) Len() int { return len(sp.Prop) } +func (sp *StructProperties) Less(i, j int) bool { return false } +func (sp *StructProperties) Swap(i, j int) { return } diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go new file mode 100644 index 000000000..5aee89c32 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proto provides functionality for handling protocol buffer messages. +// In particular, it provides marshaling and unmarshaling between a protobuf +// message and the binary wire format. +// +// See https://developers.google.com/protocol-buffers/docs/gotutorial for +// more information. +// +// Deprecated: Use the "google.golang.org/protobuf/proto" package instead. +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + ProtoPackageIsVersion1 = true + ProtoPackageIsVersion2 = true + ProtoPackageIsVersion3 = true + ProtoPackageIsVersion4 = true +) + +// GeneratedEnum is any enum type generated by protoc-gen-go +// which is a named int32 kind. +// This type exists for documentation purposes. +type GeneratedEnum interface{} + +// GeneratedMessage is any message type generated by protoc-gen-go +// which is a pointer to a named struct kind. +// This type exists for documentation purposes. +type GeneratedMessage interface{} + +// Message is a protocol buffer message. +// +// This is the v1 version of the message interface and is marginally better +// than an empty interface as it lacks any method to programatically interact +// with the contents of the message. +// +// A v2 message is declared in "google.golang.org/protobuf/proto".Message and +// exposes protobuf reflection as a first-class feature of the interface. +// +// To convert a v1 message to a v2 message, use the MessageV2 function. +// To convert a v2 message to a v1 message, use the MessageV1 function. +type Message = protoiface.MessageV1 + +// MessageV1 converts either a v1 or v2 message to a v1 message. +// It returns nil if m is nil. +func MessageV1(m GeneratedMessage) protoiface.MessageV1 { + return protoimpl.X.ProtoMessageV1Of(m) +} + +// MessageV2 converts either a v1 or v2 message to a v2 message. +// It returns nil if m is nil. +func MessageV2(m GeneratedMessage) protoV2.Message { + return protoimpl.X.ProtoMessageV2Of(m) +} + +// MessageReflect returns a reflective view for a message. +// It returns nil if m is nil. +func MessageReflect(m Message) protoreflect.Message { + return protoimpl.X.MessageOf(m) +} + +// Marshaler is implemented by messages that can marshal themselves. +// This interface is used by the following functions: Size, Marshal, +// Buffer.Marshal, and Buffer.EncodeMessage. +// +// Deprecated: Do not implement. +type Marshaler interface { + // Marshal formats the encoded bytes of the message. + // It should be deterministic and emit valid protobuf wire data. + // The caller takes ownership of the returned buffer. + Marshal() ([]byte, error) +} + +// Unmarshaler is implemented by messages that can unmarshal themselves. +// This interface is used by the following functions: Unmarshal, UnmarshalMerge, +// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup. +// +// Deprecated: Do not implement. +type Unmarshaler interface { + // Unmarshal parses the encoded bytes of the protobuf wire input. + // The provided buffer is only valid for during method call. + // It should not reset the receiver message. + Unmarshal([]byte) error +} + +// Merger is implemented by messages that can merge themselves. +// This interface is used by the following functions: Clone and Merge. +// +// Deprecated: Do not implement. +type Merger interface { + // Merge merges the contents of src into the receiver message. + // It clones all data structures in src such that it aliases no mutable + // memory referenced by src. + Merge(src Message) +} + +// RequiredNotSetError is an error type returned when +// marshaling or unmarshaling a message with missing required fields. +type RequiredNotSetError struct { + err error +} + +func (e *RequiredNotSetError) Error() string { + if e.err != nil { + return e.err.Error() + } + return "proto: required field not set" +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +func checkRequiredNotSet(m protoV2.Message) error { + if err := protoV2.CheckInitialized(m); err != nil { + return &RequiredNotSetError{err: err} + } + return nil +} + +// Clone returns a deep copy of src. +func Clone(src Message) Message { + return MessageV1(protoV2.Clone(MessageV2(src))) +} + +// Merge merges src into dst, which must be messages of the same type. +// +// Populated scalar fields in src are copied to dst, while populated +// singular messages in src are merged into dst by recursively calling Merge. +// The elements of every list field in src is appended to the corresponded +// list fields in dst. The entries of every map field in src is copied into +// the corresponding map field in dst, possibly replacing existing entries. +// The unknown fields of src are appended to the unknown fields of dst. +func Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Equal reports whether two messages are equal. +// If two messages marshal to the same bytes under deterministic serialization, +// then Equal is guaranteed to report true. +// +// Two messages are equal if they are the same protobuf message type, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. +// +// Scalar values are compared with the equivalent of the == operator in Go, +// except bytes values which are compared using bytes.Equal and +// floating point values which specially treat NaNs as equal. +// Message values are compared by recursively calling Equal. +// Lists are equal if each element value is also equal. +// Maps are equal if they have the same set of keys, where the pair of values +// for each key is also equal. +func Equal(x, y Message) bool { + return protoV2.Equal(MessageV2(x), MessageV2(y)) +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go new file mode 100644 index 000000000..1e7ff6420 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -0,0 +1,323 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// filePath is the path to the proto source file. +type filePath = string // e.g., "google/protobuf/descriptor.proto" + +// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto. +type fileDescGZIP = []byte + +var fileCache sync.Map // map[filePath]fileDescGZIP + +// RegisterFile is called from generated code to register the compressed +// FileDescriptorProto with the file path for a proto source file. +// +// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead. +func RegisterFile(s filePath, d fileDescGZIP) { + // Decompress the descriptor. + zr, err := gzip.NewReader(bytes.NewReader(d)) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + b, err := ioutil.ReadAll(zr) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + + // Construct a protoreflect.FileDescriptor from the raw descriptor. + // Note that DescBuilder.Build automatically registers the constructed + // file descriptor with the v2 registry. + protoimpl.DescBuilder{RawDescriptor: b}.Build() + + // Locally cache the raw descriptor form for the file. + fileCache.Store(s, d) +} + +// FileDescriptor returns the compressed FileDescriptorProto given the file path +// for a proto source file. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead. +func FileDescriptor(s filePath) fileDescGZIP { + if v, ok := fileCache.Load(s); ok { + return v.(fileDescGZIP) + } + + // Find the descriptor in the v2 registry. + var b []byte + if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { + if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok { + b = fd.ProtoLegacyRawDesc() + } else { + // TODO: Use protodesc.ToFileDescriptorProto to construct + // a descriptorpb.FileDescriptorProto and marshal it. + // However, doing so causes the proto package to have a dependency + // on descriptorpb, leading to cyclic dependency issues. + } + } + + // Locally cache the raw descriptor form for the file. + if len(b) > 0 { + v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b)) + return v.(fileDescGZIP) + } + return nil +} + +// enumName is the name of an enum. For historical reasons, the enum name is +// neither the full Go name nor the full protobuf name of the enum. +// The name is the dot-separated combination of just the proto package that the +// enum is declared within followed by the Go type name of the generated enum. +type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum" + +// enumsByName maps enum values by name to their numeric counterpart. +type enumsByName = map[string]int32 + +// enumsByNumber maps enum values by number to their name counterpart. +type enumsByNumber = map[int32]string + +var enumCache sync.Map // map[enumName]enumsByName +var numFilesCache sync.Map // map[protoreflect.FullName]int + +// RegisterEnum is called from the generated code to register the mapping of +// enum value names to enum numbers for the enum identified by s. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead. +func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { + if _, ok := enumCache.Load(s); ok { + panic("proto: duplicate enum registered: " + s) + } + enumCache.Store(s, m) + + // This does not forward registration to the v2 registry since this API + // lacks sufficient information to construct a complete v2 enum descriptor. +} + +// EnumValueMap returns the mapping from enum value names to enum numbers for +// the enum of the given name. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead. +func EnumValueMap(s enumName) enumsByName { + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + + // Check whether the cache is stale. If the number of files in the current + // package differs, then it means that some enums may have been recently + // registered upstream that we do not know about. + var protoPkg protoreflect.FullName + if i := strings.LastIndexByte(s, '.'); i >= 0 { + protoPkg = protoreflect.FullName(s[:i]) + } + v, _ := numFilesCache.Load(protoPkg) + numFiles, _ := v.(int) + if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles { + return nil // cache is up-to-date; was not found earlier + } + + // Update the enum cache for all enums declared in the given proto package. + numFiles = 0 + protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool { + walkEnums(fd, func(ed protoreflect.EnumDescriptor) { + name := protoimpl.X.LegacyEnumName(ed) + if _, ok := enumCache.Load(name); !ok { + m := make(enumsByName) + evs := ed.Values() + for i := evs.Len() - 1; i >= 0; i-- { + ev := evs.Get(i) + m[string(ev.Name())] = int32(ev.Number()) + } + enumCache.LoadOrStore(name, m) + } + }) + numFiles++ + return true + }) + numFilesCache.Store(protoPkg, numFiles) + + // Check cache again for enum map. + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + return nil +} + +// walkEnums recursively walks all enums declared in d. +func walkEnums(d interface { + Enums() protoreflect.EnumDescriptors + Messages() protoreflect.MessageDescriptors +}, f func(protoreflect.EnumDescriptor)) { + eds := d.Enums() + for i := eds.Len() - 1; i >= 0; i-- { + f(eds.Get(i)) + } + mds := d.Messages() + for i := mds.Len() - 1; i >= 0; i-- { + walkEnums(mds.Get(i), f) + } +} + +// messageName is the full name of protobuf message. +type messageName = string + +var messageTypeCache sync.Map // map[messageName]reflect.Type + +// RegisterType is called from generated code to register the message Go type +// for a message of the given name. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead. +func RegisterType(m Message, s messageName) { + mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) + if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { + panic(err) + } + messageTypeCache.Store(s, reflect.TypeOf(m)) +} + +// RegisterMapType is called from generated code to register the Go map type +// for a protobuf message representing a map entry. +// +// Deprecated: Do not use. +func RegisterMapType(m interface{}, s messageName) { + t := reflect.TypeOf(m) + if t.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid map kind: %v", t)) + } + if _, ok := messageTypeCache.Load(s); ok { + panic(fmt.Errorf("proto: duplicate proto message registered: %s", s)) + } + messageTypeCache.Store(s, t) +} + +// MessageType returns the message type for a named message. +// It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead. +func MessageType(s messageName) reflect.Type { + if v, ok := messageTypeCache.Load(s); ok { + return v.(reflect.Type) + } + + // Derive the message type from the v2 registry. + var t reflect.Type + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil { + t = messageGoType(mt) + } + + // If we could not get a concrete type, it is possible that it is a + // pseudo-message for a map entry. + if t == nil { + d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s)) + if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() { + kt := goTypeForField(md.Fields().ByNumber(1)) + vt := goTypeForField(md.Fields().ByNumber(2)) + t = reflect.MapOf(kt, vt) + } + } + + // Locally cache the message type for the given name. + if t != nil { + v, _ := messageTypeCache.LoadOrStore(s, t) + return v.(reflect.Type) + } + return nil +} + +func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type { + switch k := fd.Kind(); k { + case protoreflect.EnumKind: + if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil { + return enumGoType(et) + } + return reflect.TypeOf(protoreflect.EnumNumber(0)) + case protoreflect.MessageKind, protoreflect.GroupKind: + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil { + return messageGoType(mt) + } + return reflect.TypeOf((*protoreflect.Message)(nil)).Elem() + default: + return reflect.TypeOf(fd.Default().Interface()) + } +} + +func enumGoType(et protoreflect.EnumType) reflect.Type { + return reflect.TypeOf(et.New(0)) +} + +func messageGoType(mt protoreflect.MessageType) reflect.Type { + return reflect.TypeOf(MessageV1(mt.Zero().Interface())) +} + +// MessageName returns the full protobuf name for the given message type. +// +// Deprecated: Use protoreflect.MessageDescriptor.FullName instead. +func MessageName(m Message) messageName { + if m == nil { + return "" + } + if m, ok := m.(interface{ XXX_MessageName() messageName }); ok { + return m.XXX_MessageName() + } + return messageName(protoimpl.X.MessageDescriptorOf(m).FullName()) +} + +// RegisterExtension is called from the generated code to register +// the extension descriptor. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead. +func RegisterExtension(d *ExtensionDesc) { + if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { + panic(err) + } +} + +type extensionsByNumber = map[int32]*ExtensionDesc + +var extensionCache sync.Map // map[messageName]extensionsByNumber + +// RegisteredExtensions returns a map of the registered extensions for the +// provided protobuf message, indexed by the extension field number. +// +// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead. +func RegisteredExtensions(m Message) extensionsByNumber { + // Check whether the cache is stale. If the number of extensions for + // the given message differs, then it means that some extensions were + // recently registered upstream that we do not know about. + s := MessageName(m) + v, _ := extensionCache.Load(s) + xs, _ := v.(extensionsByNumber) + if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) { + return xs // cache is up-to-date + } + + // Cache is stale, re-compute the extensions map. + xs = make(extensionsByNumber) + protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool { + if xd, ok := xt.(*ExtensionDesc); ok { + xs[int32(xt.TypeDescriptor().Number())] = xd + } else { + // TODO: This implies that the protoreflect.ExtensionType is a + // custom type not generated by protoc-gen-go. We could try and + // convert the type to an ExtensionDesc. + } + return true + }) + extensionCache.Store(s, xs) + return xs +} diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go deleted file mode 100644 index 5cb11fa95..000000000 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,2776 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) - deref bool // dereference the pointer before operating on it; implies isptr -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - for _, f := range u.fields { - if f.required { - if ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.sizecache = invalidField - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - // get oneof implementers - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - u.v1extensions = toField(&f) - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { - t = t.Elem() - } - sizer, marshaler := typeMarshaler(t, tags, false, false) - var deref bool - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - t = reflect.PtrTo(t) - deref = true - } - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - isptr: t.Kind() == reflect.Ptr, - deref: deref, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - } - } -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, err := m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go deleted file mode 100644 index 5525def6a..000000000 --- a/vendor/github.com/golang/protobuf/proto/table_merge.go +++ /dev/null @@ -1,654 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("message field %s without pointer", tf)) - case isSlice: // E.g., []*pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mi.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mi.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go deleted file mode 100644 index acee2fc52..000000000 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2053 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - panic("no extensions field available") - } - } - - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - u.oldExtensions = toField(&f) - continue - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - - } - - // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 1aaee725b..000000000 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,843 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go new file mode 100644 index 000000000..4a5931009 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_decode.go @@ -0,0 +1,801 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/prototext" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextUnmarshalV2 = false + +// ParseError is returned by UnmarshalText. +type ParseError struct { + Message string + + // Deprecated: Do not use. + Line, Offset int +} + +func (e *ParseError) Error() string { + if wrapTextUnmarshalV2 { + return e.Message + } + if e.Line == 1 { + return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message) + } + return fmt.Sprintf("line %d: %v", e.Line, e.Message) +} + +// UnmarshalText parses a proto text formatted string into m. +func UnmarshalText(s string, m Message) error { + if u, ok := m.(encoding.TextUnmarshaler); ok { + return u.UnmarshalText([]byte(s)) + } + + m.Reset() + mi := MessageV2(m) + + if wrapTextUnmarshalV2 { + err := prototext.UnmarshalOptions{ + AllowPartial: true, + }.Unmarshal([]byte(s), mi) + if err != nil { + return &ParseError{Message: err.Error()} + } + return checkRequiredNotSet(mi) + } else { + if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil { + return err + } + return checkRequiredNotSet(mi) + } +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) { + md := m.Descriptor() + fds := md.Fields() + + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + seen := make(map[protoreflect.FieldNumber]bool) + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + if err := p.unmarshalExtensionOrAny(m, seen); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := protoreflect.Name(tok.value) + fd := fds.ByName(name) + switch { + case fd == nil: + gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name)))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name { + fd = gd + } + case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name: + fd = nil + case fd.IsWeak() && fd.Message().IsPlaceholder(): + fd = nil + } + if fd == nil { + typeName := string(md.FullName()) + if m, ok := m.Interface().(Message); ok { + t := reflect.TypeOf(m) + if t.Kind() == reflect.Ptr { + typeName = t.Elem().String() + } + } + return p.errorf("unknown field name %q in %v", name, typeName) + } + if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name()) + } + if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] { + return p.errorf("non-repeated field %q was repeated", fd.Name()) + } + seen[fd.Number()] = true + + // Consume any colon. + if err := p.checkForColon(fd); err != nil { + return err + } + + // Parse into the field. + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + if v, err = p.unmarshalValue(v, fd); err != nil { + return err + } + m.Set(fd, v) + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + } + return nil +} + +func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error { + name, err := p.consumeExtensionOrAnyName() + if err != nil { + return err + } + + // If it contains a slash, it's an Any type URL. + if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 { + tok := p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + + mt, err := protoregistry.GlobalTypes.FindMessageByURL(name) + if err != nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):]) + } + m2 := mt.New() + if err := p.unmarshalMessage(m2, terminator); err != nil { + return err + } + b, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err) + } + + urlFD := m.Descriptor().Fields().ByName("type_url") + valFD := m.Descriptor().Fields().ByName("value") + if seen[urlFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name()) + } + if seen[valFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name()) + } + m.Set(urlFD, protoreflect.ValueOfString(name)) + m.Set(valFD, protoreflect.ValueOfBytes(b)) + seen[urlFD.Number()] = true + seen[valFD.Number()] = true + return nil + } + + xname := protoreflect.FullName(name) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(m.Descriptor()) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + return p.errorf("unrecognized extension %q", name) + } + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName()) + } + + if err := p.checkForColon(fd); err != nil { + return err + } + + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + v, err = p.unmarshalValue(v, fd) + if err != nil { + return err + } + m.Set(fd, v) + return p.consumeOptionalSeparator() +} + +func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch { + case fd.IsList(): + lv := v.List() + var err error + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return v, p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return v, nil + } + + // One value of the repeated field. + p.back() + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + return v, nil + case fd.IsMap(): + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + + keyFD := fd.MapKey() + valFD := fd.MapValue() + + mv := v.Map() + kv := keyFD.Default() + vv := mv.NewValue() + for { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == terminator { + break + } + var err error + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return v, err + } + if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + case "value": + if err := p.checkForColon(valFD); err != nil { + return v, err + } + if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + default: + p.back() + return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + mv.Set(kv.MapKey(), vv) + return v, nil + default: + p.back() + return p.unmarshalSingularValue(v, fd) + } +} + +func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch fd.Kind() { + case protoreflect.BoolKind: + switch tok.value { + case "true", "1", "t", "True": + return protoreflect.ValueOfBool(true), nil + case "false", "0", "f", "False": + return protoreflect.ValueOfBool(false), nil + } + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil + } + } + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil + } + } + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfUint32(uint32(x)), nil + } + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfUint64(uint64(x)), nil + } + case protoreflect.FloatKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 32); err == nil { + return protoreflect.ValueOfFloat32(float32(x)), nil + } + case protoreflect.DoubleKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 64); err == nil { + return protoreflect.ValueOfFloat64(float64(x)), nil + } + case protoreflect.StringKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfString(tok.unquoted), nil + } + case protoreflect.BytesKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil + } + case protoreflect.EnumKind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil + } + vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value)) + if vd != nil { + return protoreflect.ValueOfEnum(vd.Number()), nil + } + case protoreflect.MessageKind, protoreflect.GroupKind: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + err := p.unmarshalMessage(v.Message(), terminator) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } + return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value) +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + if fd.Message() == nil { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +// consumeExtensionOrAnyName consumes an extension name or an Any type URL and +// the following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtensionOrAnyName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in unmarshalMessage to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +var errBadUTF8 = errors.New("proto: bad UTF-8") + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go new file mode 100644 index 000000000..a31134eeb --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_encode.go @@ -0,0 +1,560 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "encoding" + "fmt" + "io" + "math" + "sort" + "strings" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextMarshalV2 = false + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line) + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes the proto text format of m to w. +func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error { + b, err := tm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// Text returns a proto text formatted string of m. +func (tm *TextMarshaler) Text(m Message) string { + b, _ := tm.marshal(m) + return string(b) +} + +func (tm *TextMarshaler) marshal(m Message) ([]byte, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return []byte(""), nil + } + + if wrapTextMarshalV2 { + if m, ok := m.(encoding.TextMarshaler); ok { + return m.MarshalText() + } + + opts := prototext.MarshalOptions{ + AllowPartial: true, + EmitUnknown: true, + } + if !tm.Compact { + opts.Indent = " " + } + if !tm.ExpandAny { + opts.Resolver = (*protoregistry.Types)(nil) + } + return opts.Marshal(mr.Interface()) + } else { + w := &textWriter{ + compact: tm.Compact, + expandAny: tm.ExpandAny, + complete: true, + } + + if m, ok := m.(encoding.TextMarshaler); ok { + b, err := m.MarshalText() + if err != nil { + return nil, err + } + w.Write(b) + return w.buf, nil + } + + err := w.writeMessage(mr) + return w.buf, err + } +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// MarshalText writes the proto text format of m to w. +func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } + +// MarshalTextString returns a proto text formatted string of m. +func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } + +// CompactText writes the compact proto text format of m to w. +func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } + +// CompactTextString returns a compact proto text formatted string of m. +func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } + +var ( + newline = []byte("\n") + endBraceNewline = []byte("}\n") + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + compact bool // same as TextMarshaler.Compact + expandAny bool // same as TextMarshaler.ExpandAny + complete bool // whether the current position is a complete line + indent int // indentation level; never negative + buf []byte +} + +func (w *textWriter) Write(p []byte) (n int, _ error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, p...) + w.complete = false + return len(p), nil + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + w.buf = append(w.buf, ' ') + n++ + } + w.buf = append(w.buf, frag...) + n += len(frag) + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + w.buf = append(w.buf, frag...) + n += len(frag) + if i+1 < len(frags) { + w.buf = append(w.buf, '\n') + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, c) + w.complete = c == '\n' + return nil +} + +func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + + if fd.Kind() != protoreflect.GroupKind { + w.buf = append(w.buf, fd.Name()...) + w.WriteByte(':') + } else { + // Use message type name for group field name. + w.buf = append(w.buf, fd.Message().Name()...) + } + + if !w.compact { + w.WriteByte(' ') + } +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) { + md := m.Descriptor() + fdURL := md.Fields().ByName("type_url") + fdVal := md.Fields().ByName("value") + + url := m.Get(fdURL).String() + mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) + if err != nil { + return false, nil + } + + b := m.Get(fdVal).Bytes() + m2 := mt.New() + if err := proto.Unmarshal(b, m2.Interface()); err != nil { + return false, nil + } + w.Write([]byte("[")) + if requiresQuotes(url) { + w.writeQuotedString(url) + } else { + w.Write([]byte(url)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.indent++ + } + if err := w.writeMessage(m2); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.indent-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (w *textWriter) writeMessage(m protoreflect.Message) error { + md := m.Descriptor() + if w.expandAny && md.FullName() == "google.protobuf.Any" { + if canExpand, err := w.writeProto3Any(m); canExpand { + return err + } + } + + fds := md.Fields() + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + } else { + i++ + } + if fd == nil || !m.Has(fd) { + continue + } + + switch { + case fd.IsList(): + lv := m.Get(fd).List() + for j := 0; j < lv.Len(); j++ { + w.writeName(fd) + v := lv.Get(j) + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + } + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := m.Get(fd).Map() + + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + for _, entry := range entries { + w.writeName(fd) + w.WriteByte('<') + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + w.writeName(kfd) + if err := w.writeSingularValue(entry.key, kfd); err != nil { + return err + } + w.WriteByte('\n') + w.writeName(vfd) + if err := w.writeSingularValue(entry.val, vfd); err != nil { + return err + } + w.WriteByte('\n') + w.indent-- + w.WriteByte('>') + w.WriteByte('\n') + } + default: + w.writeName(fd) + if err := w.writeSingularValue(m.Get(fd), fd); err != nil { + return err + } + w.WriteByte('\n') + } + } + + if b := m.GetUnknown(); len(b) > 0 { + w.writeUnknownFields(b) + } + return w.writeExtensions(m) +} + +func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch fd.Kind() { + case protoreflect.FloatKind, protoreflect.DoubleKind: + switch vf := v.Float(); { + case math.IsInf(vf, +1): + w.Write(posInf) + case math.IsInf(vf, -1): + w.Write(negInf) + case math.IsNaN(vf): + w.Write(nan) + default: + fmt.Fprint(w, v.Interface()) + } + case protoreflect.StringKind: + // NOTE: This does not validate UTF-8 for historical reasons. + w.writeQuotedString(string(v.String())) + case protoreflect.BytesKind: + w.writeQuotedString(string(v.Bytes())) + case protoreflect.MessageKind, protoreflect.GroupKind: + var bra, ket byte = '<', '>' + if fd.Kind() == protoreflect.GroupKind { + bra, ket = '{', '}' + } + w.WriteByte(bra) + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + m := v.Message() + if m2, ok := m.Interface().(encoding.TextMarshaler); ok { + b, err := m2.MarshalText() + if err != nil { + return err + } + w.Write(b) + } else { + w.writeMessage(m) + } + w.indent-- + w.WriteByte(ket) + case protoreflect.EnumKind: + if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil { + fmt.Fprint(w, ev.Name()) + } else { + fmt.Fprint(w, v.Enum()) + } + default: + fmt.Fprint(w, v.Interface()) + } + return nil +} + +// writeQuotedString writes a quoted string in the protocol buffer text format. +func (w *textWriter) writeQuotedString(s string) { + w.WriteByte('"') + for i := 0; i < len(s); i++ { + switch c := s[i]; c { + case '\n': + w.buf = append(w.buf, `\n`...) + case '\r': + w.buf = append(w.buf, `\r`...) + case '\t': + w.buf = append(w.buf, `\t`...) + case '"': + w.buf = append(w.buf, `\"`...) + case '\\': + w.buf = append(w.buf, `\\`...) + default: + if isPrint := c >= 0x20 && c < 0x7f; isPrint { + w.buf = append(w.buf, c) + } else { + w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...) + } + } + } + w.WriteByte('"') +} + +func (w *textWriter) writeUnknownFields(b []byte) { + if !w.compact { + fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b)) + } + + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return + } + b = b[n:] + + if wtyp == protowire.EndGroupType { + w.indent-- + w.Write(endBraceNewline) + continue + } + fmt.Fprint(w, num) + if wtyp != protowire.StartGroupType { + w.WriteByte(':') + } + if !w.compact || wtyp == protowire.StartGroupType { + w.WriteByte(' ') + } + switch wtyp { + case protowire.VarintType: + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed32Type: + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed64Type: + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.BytesType: + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprintf(w, "%q", v) + case protowire.StartGroupType: + w.WriteByte('{') + w.indent++ + default: + fmt.Fprintf(w, "/* unknown wire type %d */", wtyp) + } + w.WriteByte('\n') + } +} + +// writeExtensions writes all the extensions in m. +func (w *textWriter) writeExtensions(m protoreflect.Message) error { + md := m.Descriptor() + if md.ExtensionRanges().Len() == 0 { + return nil + } + + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + // For message set, use the name of the message as the extension name. + name := string(ext.desc.FullName()) + if isMessageSet(ext.desc.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + if !ext.desc.IsList() { + if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil { + return err + } + } else { + lv := ext.val.List() + for i := 0; i < lv.Len(); i++ { + if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil { + return err + } + } + } + } + return nil +} + +func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + fmt.Fprintf(w, "[%s]:", name) + if !w.compact { + w.WriteByte(' ') + } + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + for i := 0; i < w.indent*2; i++ { + w.buf = append(w.buf, ' ') + } + w.complete = false +} diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index bb55a3af2..000000000 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,880 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(i), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go new file mode 100644 index 000000000..d7c28da5a --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wire.go @@ -0,0 +1,78 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Size returns the size in bytes of the wire-format encoding of m. +func Size(m Message) int { + if m == nil { + return 0 + } + mi := MessageV2(m) + return protoV2.Size(mi) +} + +// Marshal returns the wire-format encoding of m. +func Marshal(m Message) ([]byte, error) { + b, err := marshalAppend(nil, m, false) + if b == nil { + b = zeroBytes + } + return b, err +} + +var zeroBytes = make([]byte, 0, 0) + +func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) { + if m == nil { + return nil, ErrNil + } + mi := MessageV2(m) + nbuf, err := protoV2.MarshalOptions{ + Deterministic: deterministic, + AllowPartial: true, + }.MarshalAppend(buf, mi) + if err != nil { + return buf, err + } + if len(buf) == len(nbuf) { + if !mi.ProtoReflect().IsValid() { + return buf, ErrNil + } + } + return nbuf, checkRequiredNotSet(mi) +} + +// Unmarshal parses a wire-format message in b and places the decoded results in m. +// +// Unmarshal resets m before starting to unmarshal, so any existing data in m is always +// removed. Use UnmarshalMerge to preserve and append to existing data. +func Unmarshal(b []byte, m Message) error { + m.Reset() + return UnmarshalMerge(b, m) +} + +// UnmarshalMerge parses a wire-format message in b and places the decoded results in m. +func UnmarshalMerge(b []byte, m Message) error { + mi := MessageV2(m) + out, err := protoV2.UnmarshalOptions{ + AllowPartial: true, + Merge: true, + }.UnmarshalState(protoiface.UnmarshalInput{ + Buf: b, + Message: mi.ProtoReflect(), + }) + if err != nil { + return err + } + if out.Flags&protoiface.UnmarshalInitialized > 0 { + return nil + } + return checkRequiredNotSet(mi) +} diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go new file mode 100644 index 000000000..398e34859 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wrappers.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// Bool stores v in a new bool value and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int stores v in a new int32 value and returns a pointer to it. +// +// Deprecated: Use Int32 instead. +func Int(v int) *int32 { return Int32(int32(v)) } + +// Int32 stores v in a new int32 value and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 stores v in a new int64 value and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Uint32 stores v in a new uint32 value and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 stores v in a new uint64 value and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// Float32 stores v in a new float32 value and returns a pointer to it. +func Float32(v float32) *float32 { return &v } + +// Float64 stores v in a new float64 value and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// String stores v in a new string value and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index 70276e8f5..e729dcff1 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -1,141 +1,165 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package ptypes -// This file implements functions to marshal proto.Message to/from -// google.protobuf.Any message. - import ( "fmt" - "reflect" "strings" "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + anypb "github.com/golang/protobuf/ptypes/any" ) -const googleApis = "type.googleapis.com/" +const urlPrefix = "type.googleapis.com/" -// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. -// -// Note that regular type assertions should be done using the Is -// function. AnyMessageName is provided for less common use cases like filtering a -// sequence of Any messages based on a set of allowed message type names. -func AnyMessageName(any *any.Any) (string, error) { +// AnyMessageName returns the message name contained in an anypb.Any message. +// Most type assertions should use the Is function instead. +func AnyMessageName(any *anypb.Any) (string, error) { + name, err := anyMessageName(any) + return string(name), err +} +func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { if any == nil { return "", fmt.Errorf("message is nil") } - slash := strings.LastIndex(any.TypeUrl, "/") - if slash < 0 { + name := protoreflect.FullName(any.TypeUrl) + if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) } - return any.TypeUrl[slash+1:], nil + return name, nil } -// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. -func MarshalAny(pb proto.Message) (*any.Any, error) { - value, err := proto.Marshal(pb) +// MarshalAny marshals the given message m into an anypb.Any message. +func MarshalAny(m proto.Message) (*anypb.Any, error) { + switch dm := m.(type) { + case DynamicAny: + m = dm.Message + case *DynamicAny: + if dm == nil { + return nil, proto.ErrNil + } + m = dm.Message + } + b, err := proto.Marshal(m) if err != nil { return nil, err } - return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in a google.protobuf.Any -// message. The allocated message is stored in the embedded proto.Message. -// -// Example: -// -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -type DynamicAny struct { - proto.Message + return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil } -// Empty returns a new proto.Message of the type specified in a -// google.protobuf.Any message. It returns an error if corresponding message -// type isn't linked in. -func Empty(any *any.Any) (proto.Message, error) { - aname, err := AnyMessageName(any) +// Empty returns a new message of the type specified in an anypb.Any message. +// It returns protoregistry.NotFound if the corresponding message type could not +// be resolved in the global registry. +func Empty(any *anypb.Any) (proto.Message, error) { + name, err := anyMessageName(any) if err != nil { return nil, err } - - t := proto.MessageType(aname) - if t == nil { - return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + mt, err := protoregistry.GlobalTypes.FindMessageByName(name) + if err != nil { + return nil, err } - return reflect.New(t.Elem()).Interface().(proto.Message), nil + return proto.MessageV1(mt.New().Interface()), nil } -// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any -// message and places the decoded result in pb. It returns an error if type of -// contents of Any message does not match type of pb message. +// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message +// into the provided message m. It returns an error if the target message +// does not match the type in the Any message or if an unmarshal error occurs. // -// pb can be a proto.Message, or a *DynamicAny. -func UnmarshalAny(any *any.Any, pb proto.Message) error { - if d, ok := pb.(*DynamicAny); ok { - if d.Message == nil { +// The target message m may be a *DynamicAny message. If the underlying message +// type could not be resolved, then this returns protoregistry.NotFound. +func UnmarshalAny(any *anypb.Any, m proto.Message) error { + if dm, ok := m.(*DynamicAny); ok { + if dm.Message == nil { var err error - d.Message, err = Empty(any) + dm.Message, err = Empty(any) if err != nil { return err } } - return UnmarshalAny(any, d.Message) + m = dm.Message } - aname, err := AnyMessageName(any) + anyName, err := AnyMessageName(any) if err != nil { return err } - - mname := proto.MessageName(pb) - if aname != mname { - return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + msgName := proto.MessageName(m) + if anyName != msgName { + return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) } - return proto.Unmarshal(any.Value, pb) + return proto.Unmarshal(any.Value, m) } -// Is returns true if any value contains a given message type. -func Is(any *any.Any, pb proto.Message) bool { - // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), - // but it avoids scanning TypeUrl for the slash. - if any == nil { +// Is reports whether the Any message contains a message of the specified type. +func Is(any *anypb.Any, m proto.Message) bool { + if any == nil || m == nil { return false } - name := proto.MessageName(pb) - prefix := len(any.TypeUrl) - len(name) - return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name + name := proto.MessageName(m) + if !strings.HasSuffix(any.TypeUrl, name) { + return false + } + return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in an anypb.Any message. +// The allocated message is stored in the embedded proto.Message. +// +// Example: +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct{ proto.Message } + +func (m DynamicAny) String() string { + if m.Message == nil { + return "" + } + return m.Message.String() +} +func (m DynamicAny) Reset() { + if m.Message == nil { + return + } + m.Message.Reset() +} +func (m DynamicAny) ProtoMessage() { + return +} +func (m DynamicAny) ProtoReflect() protoreflect.Message { + if m.Message == nil { + return nil + } + return dynamicAny{proto.MessageReflect(m.Message)} +} + +type dynamicAny struct{ protoreflect.Message } + +func (m dynamicAny) Type() protoreflect.MessageType { + return dynamicAnyType{m.Message.Type()} +} +func (m dynamicAny) New() protoreflect.Message { + return dynamicAnyType{m.Message.Type()}.New() +} +func (m dynamicAny) Interface() protoreflect.ProtoMessage { + return DynamicAny{proto.MessageV1(m.Message.Interface())} +} + +type dynamicAnyType struct{ protoreflect.MessageType } + +func (t dynamicAnyType) New() protoreflect.Message { + return dynamicAny{t.MessageType.New()} +} +func (t dynamicAnyType) Zero() protoreflect.Message { + return dynamicAny{t.MessageType.Zero()} } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index 78ee52334..0ef27d33d 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,200 +1,62 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/any.proto +// source: github.com/golang/protobuf/ptypes/any/any.proto package any import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +// Symbols defined in public import of google/protobuf/any.proto. -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +type Any = anypb.Any -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -type Any struct { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` - // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { - return fileDescriptor_b53526c13ae22eb4, []int{0} +var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } -func (*Any) XXX_WellKnownType() string { return "Any" } - -func (m *Any) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Any.Unmarshal(m, b) -} -func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Any.Marshal(b, m, deterministic) -} -func (m *Any) XXX_Merge(src proto.Message) { - xxx_messageInfo_Any.Merge(m, src) -} -func (m *Any) XXX_Size() int { - return xxx_messageInfo_Any.Size(m) -} -func (m *Any) XXX_DiscardUnknown() { - xxx_messageInfo_Any.DiscardUnknown(m) +var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -var xxx_messageInfo_Any proto.InternalMessageInfo - -func (m *Any) GetTypeUrl() string { - if m != nil { - return m.TypeUrl +func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } +func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { + if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { + return } - return "" -} - -func (m *Any) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*Any)(nil), "google.protobuf.Any") -} - -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } - -var fileDescriptor_b53526c13ae22eb4 = []byte{ - // 185 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, - 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, - 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, - 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, - 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, - 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, - 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, - 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, - 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, - 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, - 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_any_any_proto = out.File + file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto deleted file mode 100644 index 493294255..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ /dev/null @@ -1,154 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option go_package = "github.com/golang/protobuf/ptypes/any"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "AnyProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -message Any { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - string type_url = 1; - - // Must be a valid serialized protocol buffer of the above specified type. - bytes value = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go index c0d595da7..fb9edd5c6 100644 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -1,35 +1,6 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. -/* -Package ptypes contains code for interacting with well-known types. -*/ +// Package ptypes provides functionality for interacting with well-known types. package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 26d1ca2fb..6110ae8a4 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -1,102 +1,72 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package ptypes -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - import ( "errors" "fmt" "time" - durpb "github.com/golang/protobuf/ptypes/duration" + durationpb "github.com/golang/protobuf/ptypes/duration" ) +// Range of google.protobuf.Duration as specified in duration.proto. +// This is about 10,000 years in seconds. const ( - // Range of a durpb.Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) minSeconds = -maxSeconds ) -// validateDuration determines whether the durpb.Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid durpb.Duration -// may still be too large to fit into a time.Duration (the range of durpb.Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *durpb.Duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) - } - return nil -} - -// Duration converts a durpb.Duration to a time.Duration. Duration -// returns an error if the durpb.Duration is invalid or is too large to be -// represented in a time.Duration. -func Duration(p *durpb.Duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { +// Duration converts a durationpb.Duration to a time.Duration. +// Duration returns an error if dur is invalid or overflows a time.Duration. +func Duration(dur *durationpb.Duration) (time.Duration, error) { + if err := validateDuration(dur); err != nil { return 0, err } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + d := time.Duration(dur.Seconds) * time.Second + if int64(d/time.Second) != dur.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) * time.Nanosecond - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + if dur.Nanos != 0 { + d += time.Duration(dur.Nanos) * time.Nanosecond + if (d < 0) != (dur.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) } } return d, nil } -// DurationProto converts a time.Duration to a durpb.Duration. -func DurationProto(d time.Duration) *durpb.Duration { +// DurationProto converts a time.Duration to a durationpb.Duration. +func DurationProto(d time.Duration) *durationpb.Duration { nanos := d.Nanoseconds() secs := nanos / 1e9 nanos -= secs * 1e9 - return &durpb.Duration{ - Seconds: secs, + return &durationpb.Duration{ + Seconds: int64(secs), Nanos: int32(nanos), } } + +// validateDuration determines whether the durationpb.Duration is valid +// according to the definition in google/protobuf/duration.proto. +// A valid durpb.Duration may still be too large to fit into a time.Duration +// Note that the range of durationpb.Duration is about 10,000 years, +// while the range of time.Duration is about 290 years. +func validateDuration(dur *durationpb.Duration) error { + if dur == nil { + return errors.New("duration: nil Duration") + } + if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", dur) + } + if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", dur) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index 0d681ee21..d0079ee3e 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -1,161 +1,63 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/duration.proto +// source: github.com/golang/protobuf/ptypes/duration/duration.proto package duration import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +// Symbols defined in public import of google/protobuf/duration.proto. -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +type Duration = durationpb.Duration -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -type Duration struct { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_23597b2ebd7ac6c5, []int{0} +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func (*Duration) XXX_WellKnownType() string { return "Duration" } - -func (m *Duration) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Duration.Unmarshal(m, b) -} -func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Duration.Marshal(b, m, deterministic) -} -func (m *Duration) XXX_Merge(src proto.Message) { - xxx_messageInfo_Duration.Merge(m, src) -} -func (m *Duration) XXX_Size() int { - return xxx_messageInfo_Duration.Size(m) -} -func (m *Duration) XXX_DiscardUnknown() { - xxx_messageInfo_Duration.DiscardUnknown(m) +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -var xxx_messageInfo_Duration proto.InternalMessageInfo - -func (m *Duration) GetSeconds() int64 { - if m != nil { - return m.Seconds +func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } +func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { + if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { + return } - return 0 -} - -func (m *Duration) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -func init() { - proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") -} - -func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } - -var fileDescriptor_23597b2ebd7ac6c5 = []byte{ - // 190 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, - 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, - 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, - 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, - 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, - 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, - 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, - 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, - 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, - 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, - 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File + file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil } diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto deleted file mode 100644 index 975fce41a..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto +++ /dev/null @@ -1,117 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/duration"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DurationProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -message Duration { - - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - int64 seconds = 1; - - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 8da0df01a..026d0d491 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -1,46 +1,18 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package ptypes -// This file implements operations on google.protobuf.Timestamp. - import ( "errors" "fmt" "time" - tspb "github.com/golang/protobuf/ptypes/timestamp" + timestamppb "github.com/golang/protobuf/ptypes/timestamp" ) +// Range of google.protobuf.Duration as specified in timestamp.proto. const ( // Seconds field of the earliest valid Timestamp. // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). @@ -50,44 +22,18 @@ const ( maxValidSeconds = 253402300800 ) -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *tspb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// Timestamp converts a timestamppb.Timestamp to a time.Time. // It returns an error if the argument is invalid. // -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the +// Unlike most Go functions, if Timestamp returns an error, the first return +// value is not the zero time.Time. Instead, it is the value obtained from the // time.Unix function when passed the contents of the Timestamp, in the UTC // locale. This may or may not be a meaningful time; many invalid Timestamps // do map to valid time.Times. // // A nil Timestamp returns an error. The first return value in that case is // undefined. -func Timestamp(ts *tspb.Timestamp) (time.Time, error) { +func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { // Don't return the zero value on error, because corresponds to a valid // timestamp. Instead return whatever time.Unix gives us. var t time.Time @@ -100,7 +46,7 @@ func Timestamp(ts *tspb.Timestamp) (time.Time, error) { } // TimestampNow returns a google.protobuf.Timestamp for the current time. -func TimestampNow() *tspb.Timestamp { +func TimestampNow() *timestamppb.Timestamp { ts, err := TimestampProto(time.Now()) if err != nil { panic("ptypes: time.Now() out of Timestamp range") @@ -110,8 +56,8 @@ func TimestampNow() *tspb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. -func TimestampProto(t time.Time) (*tspb.Timestamp, error) { - ts := &tspb.Timestamp{ +func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { + ts := ×tamppb.Timestamp{ Seconds: t.Unix(), Nanos: int32(t.Nanosecond()), } @@ -121,12 +67,37 @@ func TimestampProto(t time.Time) (*tspb.Timestamp, error) { return ts, nil } -// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid -// Timestamps, it returns an error message in parentheses. -func TimestampString(ts *tspb.Timestamp) string { +// TimestampString returns the RFC 3339 string for valid Timestamps. +// For invalid Timestamps, it returns an error message in parentheses. +func TimestampString(ts *timestamppb.Timestamp) string { t, err := Timestamp(ts) if err != nil { return fmt.Sprintf("(%v)", err) } return t.Format(time.RFC3339Nano) } + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) +// and has a Nanos field in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes the problem. +// +// Every valid Timestamp can be represented by a time.Time, +// but the converse is not true. +func validateTimestamp(ts *timestamppb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index 31cd846de..a76f80760 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -1,179 +1,64 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/timestamp.proto +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto package timestamp import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +// Symbols defined in public import of google/protobuf/timestamp.proto. -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +type Timestamp = timestamppb.Timestamp -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -type Timestamp struct { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_292007bbfe81227e, []int{0} +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, + 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } - -func (m *Timestamp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Timestamp.Unmarshal(m, b) -} -func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) -} -func (m *Timestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Timestamp.Merge(m, src) -} -func (m *Timestamp) XXX_Size() int { - return xxx_messageInfo_Timestamp.Size(m) -} -func (m *Timestamp) XXX_DiscardUnknown() { - xxx_messageInfo_Timestamp.DiscardUnknown(m) +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -var xxx_messageInfo_Timestamp proto.InternalMessageInfo - -func (m *Timestamp) GetSeconds() int64 { - if m != nil { - return m.Seconds +func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } +func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { + if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { + return } - return 0 -} - -func (m *Timestamp) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -func init() { - proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") -} - -func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } - -var fileDescriptor_292007bbfe81227e = []byte{ - // 191 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, - 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, - 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, - 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, - 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, - 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, - 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, - 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, - 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, - 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, - 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil } diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto deleted file mode 100644 index eafb3fa03..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ /dev/null @@ -1,135 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/timestamp"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "TimestampProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -message Timestamp { - - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index 2133562b0..665618684 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -6,6 +6,10 @@ // // This package is intended to be a more powerful and safer alternative to // reflect.DeepEqual for comparing whether two values are semantically equal. +// It is intended to only be used in tests, as performance is not a goal and +// it may panic if it cannot compare the values. Its propensity towards +// panicking means that its unsuitable for production environments where a +// spurious panic may be fatal. // // The primary features of cmp are: // @@ -22,8 +26,8 @@ // equality is determined by recursively comparing the primitive kinds on both // values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported // fields are not compared by default; they result in panics unless suppressed -// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared -// using the AllowUnexported option. +// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly +// compared using the Exporter option. package cmp import ( @@ -62,8 +66,8 @@ import ( // // Structs are equal if recursively calling Equal on all fields report equal. // If a struct contains unexported fields, Equal panics unless an Ignore option -// (e.g., cmpopts.IgnoreUnexported) ignores that field or the AllowUnexported -// option explicitly permits comparing the unexported field. +// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option +// explicitly permits comparing the unexported field. // // Slices are equal if they are both nil or both non-nil, where recursively // calling Equal on all non-ignored slice or array elements report equal. @@ -80,7 +84,58 @@ import ( // Pointers and interfaces are equal if they are both nil or both non-nil, // where they have the same underlying concrete type and recursively // calling Equal on the underlying values reports equal. +// +// Before recursing into a pointer, slice element, or map, the current path +// is checked to detect whether the address has already been visited. +// If there is a cycle, then the pointed at values are considered equal +// only if both addresses were previously visited in the same path step. func Equal(x, y interface{}, opts ...Option) bool { + s := newState(opts) + s.compareAny(rootStep(x, y)) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values: +// y - x. It returns an empty string if and only if Equal returns true for the +// same input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from y, +// a "+" prefix to indicates an element added to y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. +// +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. +func Diff(x, y interface{}, opts ...Option) string { + s := newState(opts) + + // Optimization: If there are no other reporters, we can optimize for the + // common case where the result is equal (and thus no reported difference). + // This avoids the expensive construction of a difference tree. + if len(s.reporters) == 0 { + s.compareAny(rootStep(x, y)) + if s.result.Equal() { + return "" + } + s.result = diff.Result{} // Reset results + } + + r := new(defaultReporter) + s.reporters = append(s.reporters, reporter{r}) + s.compareAny(rootStep(x, y)) + d := r.String() + if (d == "") != s.result.Equal() { + panic("inconsistent difference and equality results") + } + return d +} + +// rootStep constructs the first path step. If x and y have differing types, +// then they are stored within an empty interface type. +func rootStep(x, y interface{}) PathStep { vx := reflect.ValueOf(x) vy := reflect.ValueOf(y) @@ -103,33 +158,7 @@ func Equal(x, y interface{}, opts ...Option) bool { t = vx.Type() } - s := newState(opts) - s.compareAny(&pathStep{t, vx, vy}) - return s.result.Equal() -} - -// Diff returns a human-readable report of the differences between two values. -// It returns an empty string if and only if Equal returns true for the same -// input values and options. -// -// The output is displayed as a literal in pseudo-Go syntax. -// At the start of each line, a "-" prefix indicates an element removed from x, -// a "+" prefix to indicates an element added to y, and the lack of a prefix -// indicates an element common to both x and y. If possible, the output -// uses fmt.Stringer.String or error.Error methods to produce more humanly -// readable outputs. In such cases, the string is prefixed with either an -// 's' or 'e' character, respectively, to indicate that the method was called. -// -// Do not depend on this output being stable. If you need the ability to -// programmatically interpret the difference, consider using a custom Reporter. -func Diff(x, y interface{}, opts ...Option) string { - r := new(defaultReporter) - eq := Equal(x, y, Options(opts), Reporter(r)) - d := r.String() - if (d == "") != eq { - panic("inconsistent difference and equality results") - } - return d + return &pathStep{t, vx, vy} } type state struct { @@ -137,6 +166,7 @@ type state struct { // Calling statelessCompare must not result in observable changes to these. result diff.Result // The current result of comparison curPath Path // The current path in the value tree + curPtrs pointerPath // The current set of visited pointers reporters []reporter // Optional reporters // recChecker checks for infinite cycles applying the same set of @@ -148,13 +178,14 @@ type state struct { dynChecker dynChecker // These fields, once set by processOption, will not change. - exporters map[reflect.Type]bool // Set of structs with unexported field visibility - opts Options // List of all fundamental and filter options + exporters []exporter // List of exporters for structs with unexported fields + opts Options // List of all fundamental and filter options } func newState(opts []Option) *state { // Always ensure a validator option exists to validate the inputs. s := &state{opts: Options{validator{}}} + s.curPtrs.Init() s.processOption(Options(opts)) return s } @@ -174,13 +205,8 @@ func (s *state) processOption(opt Option) { panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) } s.opts = append(s.opts, opt) - case visibleStructs: - if s.exporters == nil { - s.exporters = make(map[reflect.Type]bool) - } - for t := range opt { - s.exporters[t] = true - } + case exporter: + s.exporters = append(s.exporters, opt) case reporter: s.reporters = append(s.reporters, opt) default: @@ -192,9 +218,9 @@ func (s *state) processOption(opt Option) { // This function is stateless in that it does not alter the current result, // or output to any registered reporters. func (s *state) statelessCompare(step PathStep) diff.Result { - // We do not save and restore the curPath because all of the compareX - // methods should properly push and pop from the path. - // It is an implementation bug if the contents of curPath differs from + // We do not save and restore curPath and curPtrs because all of the + // compareX methods should properly push and pop from them. + // It is an implementation bug if the contents of the paths differ from // when calling this function to when returning from it. oldResult, oldReporters := s.result, s.reporters @@ -216,9 +242,17 @@ func (s *state) compareAny(step PathStep) { } s.recChecker.Check(s.curPath) - // Obtain the current type and values. + // Cycle-detection for slice elements (see NOTE in compareSlice). t := step.Type() vx, vy := step.Values() + if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() { + px, py := vx.Addr(), vy.Addr() + if eq, visited := s.curPtrs.Push(px, py); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(px, py) + } // Rule 1: Check whether an option applies on this node in the value tree. if s.tryOptions(t, vx, vy) { @@ -342,7 +376,7 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { // assuming that T is assignable to R. // Otherwise, it returns the input value as is. func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { - // TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143). + // TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143). if !flags.AtLeastGo110 { if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { return reflect.New(t).Elem() @@ -352,8 +386,10 @@ func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { } func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var addr bool var vax, vay reflect.Value // Addressable versions of vx and vy + var mayForce, mayForceInit bool step := StructField{&structField{}} for i := 0; i < t.NumField(); i++ { step.typ = t.Field(i).Type @@ -372,10 +408,18 @@ func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { // For retrieveUnexportedField to work, the parent struct must // be addressable. Create a new copy of the values if // necessary to make them addressable. + addr = vx.CanAddr() || vy.CanAddr() vax = makeAddressable(vx) vay = makeAddressable(vy) } - step.mayForce = s.exporters[t] + if !mayForceInit { + for _, xf := range s.exporters { + mayForce = mayForce || xf(t) + } + mayForceInit = true + } + step.mayForce = mayForce + step.paddr = addr step.pvx = vax step.pvy = vay step.field = t.Field(i) @@ -391,9 +435,21 @@ func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { return } - // TODO: Support cyclic data structures. + // NOTE: It is incorrect to call curPtrs.Push on the slice header pointer + // since slices represents a list of pointers, rather than a single pointer. + // The pointer checking logic must be handled on a per-element basis + // in compareAny. + // + // A slice header (see reflect.SliceHeader) in Go is a tuple of a starting + // pointer P, a length N, and a capacity C. Supposing each slice element has + // a memory size of M, then the slice is equivalent to the list of pointers: + // [P+i*M for i in range(N)] + // + // For example, v[:0] and v[:1] are slices with the same starting pointer, + // but they are clearly different values. Using the slice pointer alone + // violates the assumption that equal pointers implies equal values. - step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}}} + step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}} withIndexes := func(ix, iy int) SliceIndex { if ix >= 0 { step.vx, step.xkey = vx.Index(ix), ix @@ -470,7 +526,12 @@ func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { return } - // TODO: Support cyclic data structures. + // Cycle-detection for maps. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) // We combine and sort the two map keys so that we can perform the // comparisons in a deterministic order. @@ -507,7 +568,12 @@ func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { return } - // TODO: Support cyclic data structures. + // Cycle-detection for pointers. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) vx, vy = vx.Elem(), vy.Elem() s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go index abc3a1c3e..dfa5d2137 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -8,8 +8,8 @@ package cmp import "reflect" -const supportAllowUnexported = false +const supportExporters = false -func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value { - panic("retrieveUnexportedField is not implemented") +func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value { + panic("no support for forcibly accessing unexported fields") } diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go index 59d4ee91b..351f1a34b 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -11,13 +11,25 @@ import ( "unsafe" ) -const supportAllowUnexported = true +const supportExporters = true // retrieveUnexportedField uses unsafe to forcibly retrieve any field from // a struct such that the value has read-write permissions. // // The parent struct, v, must be addressable, while f must be a StructField -// describing the field to retrieve. -func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value { - return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem() +// describing the field to retrieve. If addr is false, +// then the returned value will be shallowed copied to be non-addressable. +func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value { + ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() + if !addr { + // A field is addressable if and only if the struct is addressable. + // If the original parent value was not addressable, shallow copy the + // value to make it non-addressable to avoid leaking an implementation + // detail of how forcibly exporting a field works. + if ve.Kind() == reflect.Interface && ve.IsNil() { + return reflect.Zero(f.Type) + } + return reflect.ValueOf(ve.Interface()).Convert(f.Type) + } + return ve } diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go index 3d2e42662..730e223ee 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -12,6 +12,13 @@ // is more important than obtaining a minimal Levenshtein distance. package diff +import ( + "math/rand" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + // EditType represents a single operation within an edit-script. type EditType uint8 @@ -112,6 +119,8 @@ func (r Result) Similar() bool { return r.NumSame+1 >= r.NumDiff } +var randInt = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) + // Difference reports whether two lists of lengths nx and ny are equal // given the definition of equality provided as f. // @@ -159,6 +168,17 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // A vertical edge is equivalent to inserting a symbol from list Y. // A diagonal edge is equivalent to a matching symbol between both X and Y. + // To ensure flexibility in changing the algorithm in the future, + // introduce some degree of deliberate instability. + // This is achieved by fiddling the zigzag iterator to start searching + // the graph starting from the bottom-right versus than the top-left. + // The result may differ depending on the starting search location, + // but still produces a valid edit script. + zigzagInit := randInt // either 0 or 1 + if flags.Deterministic { + zigzagInit = 0 + } + // Invariants: // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny @@ -209,7 +229,7 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { break } - for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + for stop1, stop2, i := false, false, zigzagInit; !(stop1 && stop2) && searchBudget > 0; i++ { // Search in a diagonal pattern for a match. z := zigzag(i) p := point{fwdFrontier.X + z, fwdFrontier.Y - z} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go new file mode 100644 index 000000000..8228e7d51 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -0,0 +1,157 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "reflect" + "strconv" +) + +// TypeString is nearly identical to reflect.Type.String, +// but has an additional option to specify that full type names be used. +func TypeString(t reflect.Type, qualified bool) string { + return string(appendTypeName(nil, t, qualified, false)) +} + +func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte { + // BUG: Go reflection provides no way to disambiguate two named types + // of the same name and within the same package, + // but declared within the namespace of different functions. + + // Named type. + if t.Name() != "" { + if qualified && t.PkgPath() != "" { + b = append(b, '"') + b = append(b, t.PkgPath()...) + b = append(b, '"') + b = append(b, '.') + b = append(b, t.Name()...) + } else { + b = append(b, t.String()...) + } + return b + } + + // Unnamed type. + switch k := t.Kind(); k { + case reflect.Bool, reflect.String, reflect.UnsafePointer, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + b = append(b, k.String()...) + case reflect.Chan: + if t.ChanDir() == reflect.RecvDir { + b = append(b, "<-"...) + } + b = append(b, "chan"...) + if t.ChanDir() == reflect.SendDir { + b = append(b, "<-"...) + } + b = append(b, ' ') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Func: + if !elideFunc { + b = append(b, "func"...) + } + b = append(b, '(') + for i := 0; i < t.NumIn(); i++ { + if i > 0 { + b = append(b, ", "...) + } + if i == t.NumIn()-1 && t.IsVariadic() { + b = append(b, "..."...) + b = appendTypeName(b, t.In(i).Elem(), qualified, false) + } else { + b = appendTypeName(b, t.In(i), qualified, false) + } + } + b = append(b, ')') + switch t.NumOut() { + case 0: + // Do nothing + case 1: + b = append(b, ' ') + b = appendTypeName(b, t.Out(0), qualified, false) + default: + b = append(b, " ("...) + for i := 0; i < t.NumOut(); i++ { + if i > 0 { + b = append(b, ", "...) + } + b = appendTypeName(b, t.Out(i), qualified, false) + } + b = append(b, ')') + } + case reflect.Struct: + b = append(b, "struct{ "...) + for i := 0; i < t.NumField(); i++ { + if i > 0 { + b = append(b, "; "...) + } + sf := t.Field(i) + if !sf.Anonymous { + if qualified && sf.PkgPath != "" { + b = append(b, '"') + b = append(b, sf.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, sf.Name...) + b = append(b, ' ') + } + b = appendTypeName(b, sf.Type, qualified, false) + if sf.Tag != "" { + b = append(b, ' ') + b = strconv.AppendQuote(b, string(sf.Tag)) + } + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + case reflect.Slice, reflect.Array: + b = append(b, '[') + if k == reflect.Array { + b = strconv.AppendUint(b, uint64(t.Len()), 10) + } + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Map: + b = append(b, "map["...) + b = appendTypeName(b, t.Key(), qualified, false) + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Ptr: + b = append(b, '*') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Interface: + b = append(b, "interface{ "...) + for i := 0; i < t.NumMethod(); i++ { + if i > 0 { + b = append(b, "; "...) + } + m := t.Method(i) + if qualified && m.PkgPath != "" { + b = append(b, '"') + b = append(b, m.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, m.Name...) + b = appendTypeName(b, m.Type, qualified, true) + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + default: + panic("invalid kind: " + k.String()) + } + return b +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go index 0a01c4796..e9e384a1c 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -21,3 +21,13 @@ func PointerOf(v reflect.Value) Pointer { // assumes that the GC implementation does not use a moving collector. return Pointer{v.Pointer(), v.Type()} } + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == 0 +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return p.p +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go index da134ae2a..b50c17ec7 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -24,3 +24,13 @@ func PointerOf(v reflect.Value) Pointer { // which is necessary if the GC ever uses a moving collector. return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} } + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == nil +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return uintptr(p.p) +} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index 793448160..4b0407a7f 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -225,8 +225,23 @@ func (validator) apply(s *state, vx, vy reflect.Value) { // Unable to Interface implies unexported field without visibility access. if !vx.CanInterface() || !vy.CanInterface() { - const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider AllowUnexported or cmpopts.IgnoreUnexported" - panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help)) + help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" + var name string + if t := s.curPath.Index(-2).Type(); t.Name() != "" { + // Named type with unexported fields. + name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType + if _, ok := reflect.New(t).Interface().(error); ok { + help = "consider using cmpopts.EquateErrors to compare error values" + } + } else { + // Unnamed type with unexported fields. Derive PkgPath from field. + var pkgPath string + for i := 0; i < t.NumField() && pkgPath == ""; i++ { + pkgPath = t.Field(i).PkgPath + } + name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int }) + } + panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help)) } panic("not reachable") @@ -360,9 +375,8 @@ func (cm comparer) String() string { return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) } -// AllowUnexported returns an Option that forcibly allows operations on -// unexported fields in certain structs, which are specified by passing in a -// value of each struct type. +// Exporter returns an Option that specifies whether Equal is allowed to +// introspect into the unexported fields of certain struct types. // // Users of this option must understand that comparing on unexported fields // from external packages is not safe since changes in the internal @@ -386,10 +400,24 @@ func (cm comparer) String() string { // // In other cases, the cmpopts.IgnoreUnexported option can be used to ignore // all unexported fields on specified struct types. -func AllowUnexported(types ...interface{}) Option { - if !supportAllowUnexported { - panic("AllowUnexported is not supported on purego builds, Google App Engine Standard, or GopherJS") +func Exporter(f func(reflect.Type) bool) Option { + if !supportExporters { + panic("Exporter is not supported on purego builds") } + return exporter(f) +} + +type exporter func(reflect.Type) bool + +func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// AllowUnexported returns an Options that allows Equal to forcibly introspect +// unexported fields of the specified struct types. +// +// See Exporter for the proper use of this option. +func AllowUnexported(types ...interface{}) Option { m := make(map[reflect.Type]bool) for _, typ := range types { t := reflect.TypeOf(typ) @@ -398,13 +426,7 @@ func AllowUnexported(types ...interface{}) Option { } m[t] = true } - return visibleStructs(m) -} - -type visibleStructs map[reflect.Type]bool - -func (visibleStructs) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { - panic("not implemented") + return exporter(func(t reflect.Type) bool { return m[t] }) } // Result represents the comparison result for a single node and @@ -436,6 +458,11 @@ func (r Result) ByFunc() bool { return r.flags&reportByFunc != 0 } +// ByCycle reports whether a reference cycle was detected. +func (r Result) ByCycle() bool { + return r.flags&reportByCycle != 0 +} + type resultFlags uint const ( @@ -446,6 +473,7 @@ const ( reportByIgnore reportByMethod reportByFunc + reportByCycle ) // Reporter is an Option that can be passed to Equal. When Equal traverses diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index 96fffd291..603dbb002 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -10,6 +10,8 @@ import ( "strings" "unicode" "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/value" ) // Path is a list of PathSteps describing the sequence of operations to get @@ -41,7 +43,7 @@ type PathStep interface { // In some cases, one or both may be invalid or have restrictions: // • For StructField, both are not interface-able if the current field // is unexported and the struct type is not explicitly permitted by - // AllowUnexported to traverse unexported fields. + // an Exporter to traverse unexported fields. // • For SliceIndex, one may be invalid if an element is missing from // either the x or y slice. // • For MapIndex, one may be invalid if an entry is missing from @@ -175,7 +177,8 @@ type structField struct { // pvx, pvy, and field are only valid if unexported is true. unexported bool mayForce bool // Forcibly allow visibility - pvx, pvy reflect.Value // Parent values + paddr bool // Was parent addressable? + pvx, pvy reflect.Value // Parent values (always addressible) field reflect.StructField // Field information } @@ -187,8 +190,8 @@ func (sf StructField) Values() (vx, vy reflect.Value) { // Forcibly obtain read-write access to an unexported struct field. if sf.mayForce { - vx = retrieveUnexportedField(sf.pvx, sf.field) - vy = retrieveUnexportedField(sf.pvy, sf.field) + vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr) + vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr) return vx, vy // CanInterface reports true } return sf.vx, sf.vy // CanInterface reports false @@ -207,6 +210,7 @@ type SliceIndex struct{ *sliceIndex } type sliceIndex struct { pathStep xkey, ykey int + isSlice bool // False for reflect.Array } func (si SliceIndex) Type() reflect.Type { return si.typ } @@ -301,6 +305,72 @@ func (tf Transform) Func() reflect.Value { return tf.trans.fnc } // The == operator can be used to detect the exact option used. func (tf Transform) Option() Option { return tf.trans } +// pointerPath represents a dual-stack of pointers encountered when +// recursively traversing the x and y values. This data structure supports +// detection of cycles and determining whether the cycles are equal. +// In Go, cycles can occur via pointers, slices, and maps. +// +// The pointerPath uses a map to represent a stack; where descension into a +// pointer pushes the address onto the stack, and ascension from a pointer +// pops the address from the stack. Thus, when traversing into a pointer from +// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles +// by checking whether the pointer has already been visited. The cycle detection +// uses a seperate stack for the x and y values. +// +// If a cycle is detected we need to determine whether the two pointers +// should be considered equal. The definition of equality chosen by Equal +// requires two graphs to have the same structure. To determine this, both the +// x and y values must have a cycle where the previous pointers were also +// encountered together as a pair. +// +// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and +// MapIndex with pointer information for the x and y values. +// Suppose px and py are two pointers to compare, we then search the +// Path for whether px was ever encountered in the Path history of x, and +// similarly so with py. If either side has a cycle, the comparison is only +// equal if both px and py have a cycle resulting from the same PathStep. +// +// Using a map as a stack is more performant as we can perform cycle detection +// in O(1) instead of O(N) where N is len(Path). +type pointerPath struct { + // mx is keyed by x pointers, where the value is the associated y pointer. + mx map[value.Pointer]value.Pointer + // my is keyed by y pointers, where the value is the associated x pointer. + my map[value.Pointer]value.Pointer +} + +func (p *pointerPath) Init() { + p.mx = make(map[value.Pointer]value.Pointer) + p.my = make(map[value.Pointer]value.Pointer) +} + +// Push indicates intent to descend into pointers vx and vy where +// visited reports whether either has been seen before. If visited before, +// equal reports whether both pointers were encountered together. +// Pop must be called if and only if the pointers were never visited. +// +// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map +// and be non-nil. +func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) { + px := value.PointerOf(vx) + py := value.PointerOf(vy) + _, ok1 := p.mx[px] + _, ok2 := p.my[py] + if ok1 || ok2 { + equal = p.mx[px] == py && p.my[py] == px // Pointers paired together + return equal, true + } + p.mx[px] = py + p.my[py] = px + return false, false +} + +// Pop ascends from pointers vx and vy. +func (p pointerPath) Pop(vx, vy reflect.Value) { + delete(p.mx, value.PointerOf(vx)) + delete(p.my, value.PointerOf(vy)) +} + // isExported reports whether the identifier is exported. func isExported(id string) bool { r, _ := utf8.DecodeRuneInString(id) diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go index 6ddf29993..aafcb3635 100644 --- a/vendor/github.com/google/go-cmp/cmp/report.go +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -41,7 +41,10 @@ func (r *defaultReporter) String() string { if r.root.NumDiff == 0 { return "" } - return formatOptions{}.FormatDiff(r.root).String() + ptrs := new(pointerReferences) + text := formatOptions{}.FormatDiff(r.root, ptrs) + resolveReferences(text) + return text.String() } func assert(ok bool) { diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index 17a05eede..9e2180964 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -11,14 +11,6 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) -// TODO: Enforce limits? -// * Enforce maximum number of records to print per node? -// * Enforce maximum size in bytes allowed? -// * As a heuristic, use less verbosity for equal nodes than unequal nodes. -// TODO: Enforce unique outputs? -// * Avoid Stringer methods if it results in same output? -// * Print pointer address if outputs still equal? - // numContextRecords is the number of surrounding equal records to print. const numContextRecords = 2 @@ -71,24 +63,66 @@ func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { opts.TypeMode = t return opts } +func (opts formatOptions) WithVerbosity(level int) formatOptions { + opts.VerbosityLevel = level + opts.LimitVerbosity = true + return opts +} +func (opts formatOptions) verbosity() uint { + switch { + case opts.VerbosityLevel < 0: + return 0 + case opts.VerbosityLevel > 16: + return 16 // some reasonable maximum to avoid shift overflow + default: + return uint(opts.VerbosityLevel) + } +} + +const maxVerbosityPreset = 3 + +// verbosityPreset modifies the verbosity settings given an index +// between 0 and maxVerbosityPreset, inclusive. +func verbosityPreset(opts formatOptions, i int) formatOptions { + opts.VerbosityLevel = int(opts.verbosity()) + 2*i + if i > 0 { + opts.AvoidStringer = true + } + if i >= maxVerbosityPreset { + opts.PrintAddresses = true + opts.QualifiedNames = true + } + return opts +} // FormatDiff converts a valueNode tree into a textNode tree, where the later // is a textual representation of the differences detected in the former. -func (opts formatOptions) FormatDiff(v *valueNode) textNode { +func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) { + if opts.DiffMode == diffIdentical { + opts = opts.WithVerbosity(1) + } else { + opts = opts.WithVerbosity(3) + } + // Check whether we have specialized formatting for this node. // This is not necessary, but helpful for producing more readable outputs. if opts.CanFormatDiffSlice(v) { return opts.FormatDiffSlice(v) } + var parentKind reflect.Kind + if v.parent != nil && v.parent.TransformerName == "" { + parentKind = v.parent.Type.Kind() + } + // For leaf nodes, format the value based on the reflect.Values alone. if v.MaxDepth == 0 { switch opts.DiffMode { case diffUnknown, diffIdentical: // Format Equal. if v.NumDiff == 0 { - outx := opts.FormatValue(v.ValueX, visitedPointers{}) - outy := opts.FormatValue(v.ValueY, visitedPointers{}) + outx := opts.FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.FormatValue(v.ValueY, parentKind, ptrs) if v.NumIgnored > 0 && v.NumSame == 0 { return textEllipsis } else if outx.Len() < outy.Len() { @@ -101,8 +135,13 @@ func (opts formatOptions) FormatDiff(v *valueNode) textNode { // Format unequal. assert(opts.DiffMode == diffUnknown) var list textList - outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{}) - outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{}) + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i).WithTypeMode(elideType) + outx = opts2.FormatValue(v.ValueX, parentKind, ptrs) + outy = opts2.FormatValue(v.ValueY, parentKind, ptrs) + } if outx != nil { list = append(list, textRecord{Diff: '-', Value: outx}) } @@ -111,34 +150,57 @@ func (opts formatOptions) FormatDiff(v *valueNode) textNode { } return opts.WithTypeMode(emitType).FormatType(v.Type, list) case diffRemoved: - return opts.FormatValue(v.ValueX, visitedPointers{}) + return opts.FormatValue(v.ValueX, parentKind, ptrs) case diffInserted: - return opts.FormatValue(v.ValueY, visitedPointers{}) + return opts.FormatValue(v.ValueY, parentKind, ptrs) default: panic("invalid diff mode") } } + // Register slice element to support cycle detection. + if parentKind == reflect.Slice { + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true) + defer ptrs.Pop() + defer func() { out = wrapTrunkReferences(ptrRefs, out) }() + } + // Descend into the child value node. if v.TransformerName != "" { - out := opts.WithTypeMode(emitType).FormatDiff(v.Value) - out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"} + out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) + out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"} return opts.FormatType(v.Type, out) } else { switch k := v.Type.Kind(); k { - case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: - return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k)) + case reflect.Struct, reflect.Array, reflect.Slice: + out = opts.formatDiffList(v.Records, k, ptrs) + out = opts.FormatType(v.Type, out) + case reflect.Map: + // Register map to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.formatDiffList(v.Records, k, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = opts.FormatType(v.Type, out) case reflect.Ptr: - return textWrap{"&", opts.FormatDiff(v.Value), ""} + // Register pointer to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.FormatDiff(v.Value, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = &textWrap{Prefix: "&", Value: out} case reflect.Interface: - return opts.WithTypeMode(emitType).FormatDiff(v.Value) + out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) default: panic(fmt.Sprintf("%v cannot have children", k)) } + return out } } -func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode { +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode { // Derive record name based on the data structure kind. var name string var formatKey func(reflect.Value) string @@ -154,7 +216,17 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te case reflect.Map: name = "entry" opts = opts.WithTypeMode(elideType) - formatKey = formatMapKey + formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) } + } + + maxLen := -1 + if opts.LimitVerbosity { + if opts.DiffMode == diffIdentical { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + } else { + maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc... + } + opts.VerbosityLevel-- } // Handle unification. @@ -163,6 +235,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te var list textList var deferredEllipsis bool // Add final "..." to indicate records were dropped for _, r := range recs { + if len(list) == maxLen { + deferredEllipsis = true + break + } + // Elide struct fields that are zero value. if k == reflect.Struct { var isZero bool @@ -186,23 +263,31 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te } continue } - if out := opts.FormatDiff(r.Value); out != nil { + if out := opts.FormatDiff(r.Value, ptrs); out != nil { list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) } } if deferredEllipsis { list.AppendEllipsis(diffStats{}) } - return textWrap{"{", list, "}"} + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} case diffUnknown: default: panic("invalid diff mode") } // Handle differencing. + var numDiffs int var list textList + var keys []reflect.Value // invariant: len(list) == len(keys) groups := coalesceAdjacentRecords(name, recs) + maxGroup := diffStats{Name: name} for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + // Handle equal records. if ds.NumDiff() == 0 { // Compute the number of leading and trailing records to print. @@ -226,16 +311,21 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te // Format the equal values. for _, r := range recs[:numLo] { - out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) } if numEqual > numLo+numHi { ds.NumIdentical -= numLo + numHi list.AppendEllipsis(ds) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } } for _, r := range recs[numEqual-numHi : numEqual] { - out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) } recs = recs[numEqual:] continue @@ -247,24 +337,70 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te case opts.CanFormatDiffSlice(r.Value): out := opts.FormatDiffSlice(r.Value) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) case r.Value.NumChildren == r.Value.MaxDepth: - outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value) - outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value) + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i) + outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + } if outx != nil { list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + keys = append(keys, r.Key) } if outy != nil { list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + keys = append(keys, r.Key) } default: - out := opts.FormatDiff(r.Value) + out := opts.FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) } } recs = recs[ds.NumDiff():] + numDiffs += ds.NumDiff() + } + if maxGroup.IsZero() { + assert(len(recs) == 0) + } else { + list.AppendEllipsis(maxGroup) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } } - assert(len(recs) == 0) - return textWrap{"{", list, "}"} + assert(len(list) == len(keys)) + + // For maps, the default formatting logic uses fmt.Stringer which may + // produce ambiguous output. Avoid calling String to disambiguate. + if k == reflect.Map { + var ambiguous bool + seenKeys := map[string]reflect.Value{} + for i, currKey := range keys { + if currKey.IsValid() { + strKey := list[i].Key + prevKey, seen := seenKeys[strKey] + if seen && prevKey.CanInterface() && currKey.CanInterface() { + ambiguous = prevKey.Interface() != currKey.Interface() + if ambiguous { + break + } + } + seenKeys[strKey] = currKey + } + } + if ambiguous { + for i, k := range keys { + if k.IsValid() { + list[i].Key = formatMapKey(k, true, ptrs) + } + } + } + } + + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} } // coalesceAdjacentRecords coalesces the list of records into groups of diff --git a/vendor/github.com/google/go-cmp/cmp/report_references.go b/vendor/github.com/google/go-cmp/cmp/report_references.go new file mode 100644 index 000000000..d620c2c20 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_references.go @@ -0,0 +1,264 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +const ( + pointerDelimPrefix = "⟪" + pointerDelimSuffix = "⟫" +) + +// formatPointer prints the address of the pointer. +func formatPointer(p value.Pointer, withDelims bool) string { + v := p.Uintptr() + if flags.Deterministic { + v = 0xdeadf00f // Only used for stable testing purposes + } + if withDelims { + return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix + } + return formatHex(uint64(v)) +} + +// pointerReferences is a stack of pointers visited so far. +type pointerReferences [][2]value.Pointer + +func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) { + if deref && vx.IsValid() { + vx = vx.Addr() + } + if deref && vy.IsValid() { + vy = vy.Addr() + } + switch d { + case diffUnknown, diffIdentical: + pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)} + case diffRemoved: + pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}} + case diffInserted: + pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)} + } + *ps = append(*ps, pp) + return pp +} + +func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) { + p = value.PointerOf(v) + for _, pp := range *ps { + if p == pp[0] || p == pp[1] { + return p, true + } + } + *ps = append(*ps, [2]value.Pointer{p, p}) + return p, false +} + +func (ps *pointerReferences) Pop() { + *ps = (*ps)[:len(*ps)-1] +} + +// trunkReferences is metadata for a textNode indicating that the sub-tree +// represents the value for either pointer in a pair of references. +type trunkReferences struct{ pp [2]value.Pointer } + +// trunkReference is metadata for a textNode indicating that the sub-tree +// represents the value for the given pointer reference. +type trunkReference struct{ p value.Pointer } + +// leafReference is metadata for a textNode indicating that the value is +// truncated as it refers to another part of the tree (i.e., a trunk). +type leafReference struct{ p value.Pointer } + +func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode { + switch { + case pp[0].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[1]}} + case pp[1].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + case pp[0] == pp[1]: + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + default: + return &textWrap{Value: s, Metadata: trunkReferences{pp}} + } +} +func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode { + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}} +} +func makeLeafReference(p value.Pointer, printAddress bool) textNode { + out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"} + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}} +} + +// resolveReferences walks the textNode tree searching for any leaf reference +// metadata and resolves each against the corresponding trunk references. +// Since pointer addresses in memory are not particularly readable to the user, +// it replaces each pointer value with an arbitrary and unique reference ID. +func resolveReferences(s textNode) { + var walkNodes func(textNode, func(textNode)) + walkNodes = func(s textNode, f func(textNode)) { + f(s) + switch s := s.(type) { + case *textWrap: + walkNodes(s.Value, f) + case textList: + for _, r := range s { + walkNodes(r.Value, f) + } + } + } + + // Collect all trunks and leaves with reference metadata. + var trunks, leaves []*textWrap + walkNodes(s, func(s textNode) { + if s, ok := s.(*textWrap); ok { + switch s.Metadata.(type) { + case leafReference: + leaves = append(leaves, s) + case trunkReference, trunkReferences: + trunks = append(trunks, s) + } + } + }) + + // No leaf references to resolve. + if len(leaves) == 0 { + return + } + + // Collect the set of all leaf references to resolve. + leafPtrs := make(map[value.Pointer]bool) + for _, leaf := range leaves { + leafPtrs[leaf.Metadata.(leafReference).p] = true + } + + // Collect the set of trunk pointers that are always paired together. + // This allows us to assign a single ID to both pointers for brevity. + // If a pointer in a pair ever occurs by itself or as a different pair, + // then the pair is broken. + pairedTrunkPtrs := make(map[value.Pointer]value.Pointer) + unpair := func(p value.Pointer) { + if !pairedTrunkPtrs[p].IsNil() { + pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half + } + pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + unpair(p.p) // standalone pointer cannot be part of a pair + case trunkReferences: + p0, ok0 := pairedTrunkPtrs[p.pp[0]] + p1, ok1 := pairedTrunkPtrs[p.pp[1]] + switch { + case !ok0 && !ok1: + // Register the newly seen pair. + pairedTrunkPtrs[p.pp[0]] = p.pp[1] + pairedTrunkPtrs[p.pp[1]] = p.pp[0] + case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]: + // Exact pair already seen; do nothing. + default: + // Pair conflicts with some other pair; break all pairs. + unpair(p.pp[0]) + unpair(p.pp[1]) + } + } + } + + // Correlate each pointer referenced by leaves to a unique identifier, + // and print the IDs for each trunk that matches those pointers. + var nextID uint + ptrIDs := make(map[value.Pointer]uint) + newID := func() uint { + id := nextID + nextID++ + return id + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + if print := leafPtrs[p.p]; print { + id, ok := ptrIDs[p.p] + if !ok { + id = newID() + ptrIDs[p.p] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } + case trunkReferences: + print0 := leafPtrs[p.pp[0]] + print1 := leafPtrs[p.pp[1]] + if print0 || print1 { + id0, ok0 := ptrIDs[p.pp[0]] + id1, ok1 := ptrIDs[p.pp[1]] + isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0] + if isPair { + var id uint + assert(ok0 == ok1) // must be seen together or not at all + if ok0 { + assert(id0 == id1) // must have the same ID + id = id0 + } else { + id = newID() + ptrIDs[p.pp[0]] = id + ptrIDs[p.pp[1]] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } else { + if print0 && !ok0 { + id0 = newID() + ptrIDs[p.pp[0]] = id0 + } + if print1 && !ok1 { + id1 = newID() + ptrIDs[p.pp[1]] = id1 + } + switch { + case print0 && print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1)) + case print0: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)) + case print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1)) + } + } + } + } + } + + // Update all leaf references with the unique identifier. + for _, leaf := range leaves { + if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok { + leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id)) + } + } +} + +func formatReference(id uint) string { + return fmt.Sprintf("ref#%d", id) +} + +func updateReferencePrefix(prefix, ref string) string { + if prefix == "" { + return pointerDelimPrefix + ref + pointerDelimSuffix + } + suffix := strings.TrimPrefix(prefix, pointerDelimPrefix) + return pointerDelimPrefix + ref + ": " + suffix +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 2761b6289..786f67126 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -5,13 +5,14 @@ package cmp import ( + "bytes" "fmt" "reflect" "strconv" "strings" "unicode" + "unicode/utf8" - "github.com/google/go-cmp/cmp/internal/flags" "github.com/google/go-cmp/cmp/internal/value" ) @@ -20,14 +21,22 @@ type formatValueOptions struct { // methods like error.Error or fmt.Stringer.String. AvoidStringer bool - // ShallowPointers controls whether to avoid descending into pointers. - // Useful when printing map keys, where pointer comparison is performed - // on the pointer address rather than the pointed-at value. - ShallowPointers bool - // PrintAddresses controls whether to print the address of all pointers, // slice elements, and maps. PrintAddresses bool + + // QualifiedNames controls whether FormatType uses the fully qualified name + // (including the full package path as opposed to just the package name). + QualifiedNames bool + + // VerbosityLevel controls the amount of output to produce. + // A higher value produces more output. A value of zero or lower produces + // no output (represented using an ellipsis). + // If LimitVerbosity is false, then the level is treated as infinite. + VerbosityLevel int + + // LimitVerbosity specifies that formatting should respect VerbosityLevel. + LimitVerbosity bool } // FormatType prints the type as if it were wrapping s. @@ -44,12 +53,15 @@ func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { default: return s } + if opts.DiffMode == diffIdentical { + return s // elide type for identical nodes + } case elideType: return s } // Determine the type label, applying special handling for unnamed types. - typeName := t.String() + typeName := value.TypeString(t, opts.QualifiedNames) if t.Name() == "" { // According to Go grammar, certain type literals contain symbols that // do not strongly bind to the next lexicographical token (e.g., *T). @@ -57,39 +69,77 @@ func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { case reflect.Chan, reflect.Func, reflect.Ptr: typeName = "(" + typeName + ")" } - typeName = strings.Replace(typeName, "struct {", "struct{", -1) - typeName = strings.Replace(typeName, "interface {", "interface{", -1) } + return &textWrap{Prefix: typeName, Value: wrapParens(s)} +} + +// wrapParens wraps s with a set of parenthesis, but avoids it if the +// wrapped node itself is already surrounded by a pair of parenthesis or braces. +// It handles unwrapping one level of pointer-reference nodes. +func wrapParens(s textNode) textNode { + var refNode *textWrap + if s2, ok := s.(*textWrap); ok { + // Unwrap a single pointer reference node. + switch s2.Metadata.(type) { + case leafReference, trunkReference, trunkReferences: + refNode = s2 + if s3, ok := refNode.Value.(*textWrap); ok { + s2 = s3 + } + } - // Avoid wrap the value in parenthesis if unnecessary. - if s, ok := s.(textWrap); ok { - hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")") - hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}") + // Already has delimiters that make parenthesis unnecessary. + hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")") + hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}") if hasParens || hasBraces { - return textWrap{typeName, s, ""} + return s } } - return textWrap{typeName + "(", s, ")"} + if refNode != nil { + refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"} + return s + } + return &textWrap{Prefix: "(", Value: s, Suffix: ")"} } // FormatValue prints the reflect.Value, taking extra care to avoid descending -// into pointers already in m. As pointers are visited, m is also updated. -func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) { +// into pointers already in ptrs. As pointers are visited, ptrs is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) { if !v.IsValid() { return nil } t := v.Type() + // Check slice element for cycles. + if parentKind == reflect.Slice { + ptrRef, visited := ptrs.Push(v.Addr()) + if visited { + return makeLeafReference(ptrRef, false) + } + defer ptrs.Pop() + defer func() { out = wrapTrunkReference(ptrRef, false, out) }() + } + // Check whether there is an Error or String method to call. if !opts.AvoidStringer && v.CanInterface() { // Avoid calling Error or String methods on nil receivers since many // implementations crash when doing so. if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { - switch v := v.Interface().(type) { - case error: - return textLine("e" + formatString(v.Error())) - case fmt.Stringer: - return textLine("s" + formatString(v.String())) + var prefix, strVal string + func() { + // Swallow and ignore any panics from String or Error. + defer func() { recover() }() + switch v := v.Interface().(type) { + case error: + strVal = v.Error() + prefix = "e" + case fmt.Stringer: + strVal = v.String() + prefix = "s" + } + }() + if prefix != "" { + return opts.formatString(prefix, strVal) } } } @@ -102,94 +152,140 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t } }() - var ptr string switch t.Kind() { case reflect.Bool: return textLine(fmt.Sprint(v.Bool())) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return textLine(fmt.Sprint(v.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - // Unnamed uints are usually bytes or words, so use hexadecimal. - if t.PkgPath() == "" || t.Kind() == reflect.Uintptr { + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return textLine(fmt.Sprint(v.Uint())) + case reflect.Uint8: + if parentKind == reflect.Slice || parentKind == reflect.Array { return textLine(formatHex(v.Uint())) } return textLine(fmt.Sprint(v.Uint())) + case reflect.Uintptr: + return textLine(formatHex(v.Uint())) case reflect.Float32, reflect.Float64: return textLine(fmt.Sprint(v.Float())) case reflect.Complex64, reflect.Complex128: return textLine(fmt.Sprint(v.Complex())) case reflect.String: - return textLine(formatString(v.String())) + return opts.formatString("", v.String()) case reflect.UnsafePointer, reflect.Chan, reflect.Func: - return textLine(formatPointer(v)) + return textLine(formatPointer(value.PointerOf(v), true)) case reflect.Struct: var list textList + v := makeAddressable(v) // needed for retrieveUnexportedField + maxLen := v.NumField() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } for i := 0; i < v.NumField(); i++ { vv := v.Field(i) if value.IsZero(vv) { continue // Elide fields with zero values } - s := opts.WithTypeMode(autoType).FormatValue(vv, m) - list = append(list, textRecord{Key: t.Field(i).Name, Value: s}) + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sf := t.Field(i) + if supportExporters && !isExported(sf.Name) { + vv = retrieveUnexportedField(v, sf, true) + } + s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) + list = append(list, textRecord{Key: sf.Name, Value: s}) } - return textWrap{"{", list, "}"} + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} case reflect.Slice: if v.IsNil() { return textNil } - if opts.PrintAddresses { - ptr = formatPointer(v) + + // Check whether this is a []byte of text data. + if t.Elem() == reflect.TypeOf(byte(0)) { + b := v.Bytes() + isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) } + if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { + out = opts.formatString("", string(b)) + return opts.WithTypeMode(emitType).FormatType(t, out) + } } + fallthrough case reflect.Array: + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } var list textList for i := 0; i < v.Len(); i++ { - vi := v.Index(i) - if vi.CanAddr() { // Check for cyclic elements - p := vi.Addr() - if m.Visit(p) { - var out textNode - out = textLine(formatPointer(p)) - out = opts.WithTypeMode(emitType).FormatType(p.Type(), out) - out = textWrap{"*", out, ""} - list = append(list, textRecord{Value: out}) - continue - } + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break } - s := opts.WithTypeMode(elideType).FormatValue(vi, m) + s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs) list = append(list, textRecord{Value: s}) } - return textWrap{ptr + "{", list, "}"} + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + if t.Kind() == reflect.Slice && opts.PrintAddresses { + header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap()) + out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out} + } + return out case reflect.Map: if v.IsNil() { return textNil } - if m.Visit(v) { - return textLine(formatPointer(v)) + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + return makeLeafReference(ptrRef, opts.PrintAddresses) } + defer ptrs.Pop() + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } var list textList for _, k := range value.SortKeys(v.MapKeys()) { - sk := formatMapKey(k) - sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m) + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sk := formatMapKey(k, false, ptrs) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs) list = append(list, textRecord{Key: sk, Value: sv}) } - if opts.PrintAddresses { - ptr = formatPointer(v) - } - return textWrap{ptr + "{", list, "}"} + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + return out case reflect.Ptr: if v.IsNil() { return textNil } - if m.Visit(v) || opts.ShallowPointers { - return textLine(formatPointer(v)) - } - if opts.PrintAddresses { - ptr = formatPointer(v) + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + out = makeLeafReference(ptrRef, opts.PrintAddresses) + return &textWrap{Prefix: "&", Value: out} } + defer ptrs.Pop() + skipType = true // Let the underlying value print the type instead - return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""} + out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + out = &textWrap{Prefix: "&", Value: out} + return out case reflect.Interface: if v.IsNil() { return textNil @@ -197,19 +293,65 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t // Interfaces accept different concrete types, // so configure the underlying value to explicitly print the type. skipType = true // Print the concrete type instead - return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m) + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) default: panic(fmt.Sprintf("%v kind not handled", v.Kind())) } } +func (opts formatOptions) formatString(prefix, s string) textNode { + maxLen := len(s) + maxLines := strings.Count(s, "\n") + 1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc... + maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + } + + // For multiline strings, use the triple-quote syntax, + // but only use it when printing removed or inserted nodes since + // we only want the extra verbosity for those cases. + lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n") + isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+') + for i := 0; i < len(lines) && isTripleQuoted; i++ { + lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + line := lines[i] + isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen + } + if isTripleQuoted { + var list textList + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + for i, line := range lines { + if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 { + comment := commentString(fmt.Sprintf("%d elided lines", numElided)) + list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment}) + break + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true}) + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + return &textWrap{Prefix: "(", Value: list, Suffix: ")"} + } + + // Format the string as a single-line quoted string. + if len(s) > maxLen+len(textEllipsis) { + return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis)) + } + return textLine(prefix + formatString(s)) +} + // formatMapKey formats v as if it were a map key. // The result is guaranteed to be a single line. -func formatMapKey(v reflect.Value) string { +func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string { var opts formatOptions + opts.DiffMode = diffIdentical opts.TypeMode = elideType - opts.ShallowPointers = true - s := opts.FormatValue(v, visitedPointers{}).String() + opts.PrintAddresses = disambiguate + opts.AvoidStringer = disambiguate + opts.QualifiedNames = disambiguate + s := opts.FormatValue(v, reflect.Map, ptrs).String() return strings.TrimSpace(s) } @@ -227,7 +369,7 @@ func formatString(s string) string { rawInvalid := func(r rune) bool { return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') } - if strings.IndexFunc(s, rawInvalid) < 0 { + if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 { return "`" + s + "`" } return qs @@ -256,23 +398,3 @@ func formatHex(u uint64) string { } return fmt.Sprintf(f, u) } - -// formatPointer prints the address of the pointer. -func formatPointer(v reflect.Value) string { - p := v.Pointer() - if flags.Deterministic { - p = 0xdeadf00f // Only used for stable testing purposes - } - return fmt.Sprintf("⟪0x%x⟫", p) -} - -type visitedPointers map[value.Pointer]struct{} - -// Visit inserts pointer v into the visited map and reports whether it had -// already been visited before. -func (m visitedPointers) Visit(v reflect.Value) bool { - p := value.PointerOf(v) - _, visited := m[p] - m[p] = struct{}{} - return visited -} diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index eafcf2e4c..35315dad3 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -8,6 +8,7 @@ import ( "bytes" "fmt" "reflect" + "strconv" "strings" "unicode" "unicode/utf8" @@ -23,11 +24,25 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { return false // Must be formatting in diff mode case v.NumDiff == 0: return false // No differences detected - case v.NumIgnored+v.NumCompared+v.NumTransformed > 0: - // TODO: Handle the case where someone uses bytes.Equal on a large slice. - return false // Some custom option was used to determined equality case !v.ValueX.IsValid() || !v.ValueY.IsValid(): return false // Both values must be valid + case v.Type.Kind() == reflect.Slice && (v.ValueX.Len() == 0 || v.ValueY.Len() == 0): + return false // Both slice values have to be non-empty + case v.NumIgnored > 0: + return false // Some ignore option was used + case v.NumTransformed > 0: + return false // Some transform option was used + case v.NumCompared > 1: + return false // More than one comparison was used + case v.NumCompared == 1 && v.Type.Name() != "": + // The need for cmp to check applicability of options on every element + // in a slice is a significant performance detriment for large []byte. + // The workaround is to specify Comparer(bytes.Equal), + // which enables cmp to compare []byte more efficiently. + // If they differ, we still want to provide batched diffing. + // The logic disallows named types since they tend to have their own + // String method, with nicer formatting than what this provides. + return false } switch t := v.Type; t.Kind() { @@ -82,7 +97,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } if isText || isBinary { var numLines, lastLineIdx, maxLineLen int - isBinary = false + isBinary = !utf8.ValidString(sx) || !utf8.ValidString(sy) for i, r := range sx + sy { if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { isBinary = true @@ -97,7 +112,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } } isText = !isBinary - isLinedText = isText && numLines >= 4 && maxLineLen <= 256 + isLinedText = isText && numLines >= 4 && maxLineLen <= 1024 } // Format the string into printable records. @@ -117,6 +132,83 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { }, ) delim = "\n" + + // If possible, use a custom triple-quote (""") syntax for printing + // differences in a string literal. This format is more readable, + // but has edge-cases where differences are visually indistinguishable. + // This format is avoided under the following conditions: + // • A line starts with `"""` + // • A line starts with "..." + // • A line contains non-printable characters + // • Adjacent different lines differ only by whitespace + // + // For example: + // """ + // ... // 3 identical lines + // foo + // bar + // - baz + // + BAZ + // """ + isTripleQuoted := true + prevRemoveLines := map[string]bool{} + prevInsertLines := map[string]bool{} + var list2 textList + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + for _, r := range list { + if !r.Value.Equal(textEllipsis) { + line, _ := strconv.Unquote(string(r.Value.(textLine))) + line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + normLine := strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return -1 // drop whitespace to avoid visually indistinguishable output + } + return r + }, line) + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" + switch r.Diff { + case diffRemoved: + isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine] + prevRemoveLines[normLine] = true + case diffInserted: + isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine] + prevInsertLines[normLine] = true + } + if !isTripleQuoted { + break + } + r.Value = textLine(line) + r.ElideComma = true + } + if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group + prevRemoveLines = map[string]bool{} + prevInsertLines = map[string]bool{} + } + list2 = append(list2, r) + } + if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 { + list2 = list2[:len(list2)-1] // elide single empty line at the end + } + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + if isTripleQuoted { + var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} + switch t.Kind() { + case reflect.String: + if t != reflect.TypeOf(string("")) { + out = opts.FormatType(t, out) + } + case reflect.Slice: + // Always emit type for slices since the triple-quote syntax + // looks like a string (not a slice). + opts = opts.WithTypeMode(emitType) + out = opts.FormatType(t, out) + } + return out + } + // If the text appears to be single-lined text, // then perform differencing in approximately fixed-sized chunks. // The output is printed as quoted strings. @@ -129,6 +221,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { }, ) delim = "" + // If the text appears to be binary data, // then perform differencing in approximately fixed-sized chunks. // The output is inspired by hexdump. @@ -145,6 +238,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { return textRecord{Diff: d, Value: textLine(s), Comment: comment} }, ) + // For all other slices of primitive types, // then perform differencing in approximately fixed-sized chunks. // The size of each chunk depends on the width of the element kind. @@ -172,7 +266,9 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch t.Elem().Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: ss = append(ss, fmt.Sprint(v.Index(i).Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + ss = append(ss, fmt.Sprint(v.Index(i).Uint())) + case reflect.Uint8, reflect.Uintptr: ss = append(ss, formatHex(v.Index(i).Uint())) case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: ss = append(ss, fmt.Sprint(v.Index(i).Interface())) @@ -185,7 +281,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } // Wrap the output with appropriate type information. - var out textNode = textWrap{"{", list, "}"} + var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"} if !isText { // The "{...}" byte-sequence literal is not valid Go syntax for strings. // Emit the type for extra clarity (e.g. "string{...}"). @@ -196,12 +292,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } switch t.Kind() { case reflect.String: - out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)} + out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} if t != reflect.TypeOf(string("")) { out = opts.FormatType(t, out) } case reflect.Slice: - out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)} + out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} if t != reflect.TypeOf([]byte(nil)) { out = opts.FormatType(t, out) } @@ -242,9 +338,22 @@ func (opts formatOptions) formatDiffSlice( return n0 - v.Len() } + var numDiffs int + maxLen := -1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + opts.VerbosityLevel-- + } + groups := coalesceAdjacentEdits(name, es) groups = coalesceInterveningIdentical(groups, chunkSize/4) + maxGroup := diffStats{Name: name} for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + // Print equal. if ds.NumDiff() == 0 { // Compute the number of leading and trailing equal bytes to print. @@ -273,12 +382,18 @@ func (opts formatOptions) formatDiffSlice( } // Print unequal. + len0 := len(list) nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) vx = vx.Slice(nx, vx.Len()) ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) vy = vy.Slice(ny, vy.Len()) + numDiffs += len(list) - len0 + } + if maxGroup.IsZero() { + assert(vx.Len() == 0 && vy.Len() == 0) + } else { + list.AppendEllipsis(maxGroup) } - assert(vx.Len() == 0 && vy.Len() == 0) return list } diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go index 8b8fcab7b..8b12c05cd 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_text.go +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -10,12 +10,15 @@ import ( "math/rand" "strings" "time" + "unicode/utf8" "github.com/google/go-cmp/cmp/internal/flags" ) var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 +const maxColumnLength = 80 + type indentMode int func (n indentMode) appendIndent(b []byte, d diffMode) []byte { @@ -91,21 +94,22 @@ type textNode interface { // textWrap is a wrapper that concatenates a prefix and/or a suffix // to the underlying node. type textWrap struct { - Prefix string // e.g., "bytes.Buffer{" - Value textNode // textWrap | textList | textLine - Suffix string // e.g., "}" + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" + Metadata interface{} // arbitrary metadata; has no effect on formatting } -func (s textWrap) Len() int { +func (s *textWrap) Len() int { return len(s.Prefix) + s.Value.Len() + len(s.Suffix) } -func (s1 textWrap) Equal(s2 textNode) bool { - if s2, ok := s2.(textWrap); ok { +func (s1 *textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(*textWrap); ok { return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix } return false } -func (s textWrap) String() string { +func (s *textWrap) String() string { var d diffMode var n indentMode _, s2 := s.formatCompactTo(nil, d) @@ -114,7 +118,7 @@ func (s textWrap) String() string { b = append(b, '\n') // Trailing newline return string(b) } -func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { +func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { n0 := len(b) // Original buffer length b = append(b, s.Prefix...) b, s.Value = s.Value.formatCompactTo(b, d) @@ -124,7 +128,7 @@ func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { } return b, s } -func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { +func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { b = append(b, s.Prefix...) b = s.Value.formatExpandedTo(b, d, n) b = append(b, s.Suffix...) @@ -136,22 +140,23 @@ func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { // of the textList.formatCompactTo method. type textList []textRecord type textRecord struct { - Diff diffMode // e.g., 0 or '-' or '+' - Key string // e.g., "MyField" - Value textNode // textWrap | textLine - Comment fmt.Stringer // e.g., "6 identical fields" + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + ElideComma bool // avoid trailing comma + Comment fmt.Stringer // e.g., "6 identical fields" } // AppendEllipsis appends a new ellipsis node to the list if none already // exists at the end. If cs is non-zero it coalesces the statistics with the // previous diffStats. func (s *textList) AppendEllipsis(ds diffStats) { - hasStats := ds != diffStats{} + hasStats := !ds.IsZero() if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { if hasStats { - *s = append(*s, textRecord{Value: textEllipsis, Comment: ds}) + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds}) } else { - *s = append(*s, textRecord{Value: textEllipsis}) + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true}) } return } @@ -191,7 +196,7 @@ func (s1 textList) Equal(s2 textNode) bool { } func (s textList) String() string { - return textWrap{"{", s, "}"}.String() + return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String() } func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { @@ -221,7 +226,7 @@ func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { } // Force multi-lined output when printing a removed/inserted node that // is sufficiently long. - if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 { + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength { multiLine = true } if !multiLine { @@ -236,16 +241,50 @@ func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { _, isLine := r.Value.(textLine) return r.Key == "" || !isLine }, - func(r textRecord) int { return len(r.Key) }, + func(r textRecord) int { return utf8.RuneCountInString(r.Key) }, ) alignValueLens := s.alignLens( func(r textRecord) bool { _, isLine := r.Value.(textLine) return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil }, - func(r textRecord) int { return len(r.Value.(textLine)) }, + func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) }, ) + // Format lists of simple lists in a batched form. + // If the list is sequence of only textLine values, + // then batch multiple values on a single line. + var isSimple bool + for _, r := range s { + _, isLine := r.Value.(textLine) + isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil + if !isSimple { + break + } + } + if isSimple { + n++ + var batch []byte + emitBatch := func() { + if len(batch) > 0 { + b = n.appendIndent(append(b, '\n'), d) + b = append(b, bytes.TrimRight(batch, " ")...) + batch = batch[:0] + } + } + for _, r := range s { + line := r.Value.(textLine) + if len(batch)+len(line)+len(", ") > maxColumnLength { + emitBatch() + } + batch = append(batch, line...) + batch = append(batch, ", "...) + } + emitBatch() + n-- + return n.appendIndent(append(b, '\n'), d) + } + // Format the list as a multi-lined output. n++ for i, r := range s { @@ -256,7 +295,7 @@ func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { b = alignKeyLens[i].appendChar(b, ' ') b = r.Value.formatExpandedTo(b, d|r.Diff, n) - if !r.Value.Equal(textEllipsis) { + if !r.ElideComma { b = append(b, ',') } b = alignValueLens[i].appendChar(b, ' ') @@ -332,6 +371,11 @@ type diffStats struct { NumModified int } +func (s diffStats) IsZero() bool { + s.Name = "" + return s == diffStats{} +} + func (s diffStats) NumDiff() int { return s.NumRemoved + s.NumInserted + s.NumModified } diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md index 64869af34..386c2a457 100644 --- a/vendor/github.com/google/gofuzz/README.md +++ b/vendor/github.com/google/gofuzz/README.md @@ -3,7 +3,7 @@ gofuzz gofuzz is a library for populating go objects with random values. -[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.png)](https://godoc.org/github.com/google/gofuzz) +[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.svg)](https://godoc.org/github.com/google/gofuzz) [![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz) This is useful for testing: diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go index 1dfa80a6f..da0a5f938 100644 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -20,6 +20,7 @@ import ( "fmt" "math/rand" "reflect" + "regexp" "time" ) @@ -28,13 +29,14 @@ type fuzzFuncMap map[reflect.Type]reflect.Value // Fuzzer knows how to fill any object with random fields. type Fuzzer struct { - fuzzFuncs fuzzFuncMap - defaultFuzzFuncs fuzzFuncMap - r *rand.Rand - nilChance float64 - minElements int - maxElements int - maxDepth int + fuzzFuncs fuzzFuncMap + defaultFuzzFuncs fuzzFuncMap + r *rand.Rand + nilChance float64 + minElements int + maxElements int + maxDepth int + skipFieldPatterns []*regexp.Regexp } // New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, @@ -150,6 +152,13 @@ func (f *Fuzzer) MaxDepth(d int) *Fuzzer { return f } +// Skip fields which match the supplied pattern. Call this multiple times if needed +// This is useful to skip XXX_ fields generated by protobuf +func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer { + f.skipFieldPatterns = append(f.skipFieldPatterns, pattern) + return f +} + // Fuzz recursively fills all of obj's fields with something random. First // this tries to find a custom fuzz function (see Funcs). If there is no // custom function this tests whether the object implements fuzz.Interface and, @@ -274,7 +283,17 @@ func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { v.Set(reflect.Zero(v.Type())) case reflect.Struct: for i := 0; i < v.NumField(); i++ { - fc.doFuzz(v.Field(i), 0) + skipField := false + fieldName := v.Type().Field(i).Name + for _, pattern := range fc.fuzzer.skipFieldPatterns { + if pattern.MatchString(fieldName) { + skipField = true + break + } + } + if !skipField { + fc.doFuzz(v.Field(i), 0) + } } case reflect.Chan: fallthrough diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go index 4fd44c45e..af6c0eee0 100644 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2020 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ package openapi_v2 import ( "fmt" "github.com/googleapis/gnostic/compiler" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "regexp" "strings" ) @@ -30,7 +30,7 @@ func Version() string { } // NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. -func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*AdditionalPropertiesItem, error) { +func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*AdditionalPropertiesItem, error) { errors := make([]error, 0) x := &AdditionalPropertiesItem{} matched := false @@ -49,9 +49,10 @@ func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*Ad } } // bool boolean = 2; - boolValue, ok := in.(bool) + boolValue, ok := compiler.BoolForScalarNode(in) if ok { x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} + matched = true } if matched { // since the oneof matched one of its possibilities, discard any matching errors @@ -61,16 +62,16 @@ func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*Ad } // NewAny creates an object of type Any if possible, returning an error if not. -func NewAny(in interface{}, context *compiler.Context) (*Any, error) { +func NewAny(in *yaml.Node, context *compiler.Context) (*Any, error) { errors := make([]error, 0) x := &Any{} - bytes, _ := yaml.Marshal(in) + bytes := compiler.Marshal(in) x.Yaml = string(bytes) return x, compiler.NewErrorGroupOrNil(errors) } // NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. -func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecurity, error) { +func NewApiKeySecurity(in *yaml.Node, context *compiler.Context) (*ApiKeySecurity, error) { errors := make([]error, 0) x := &ApiKeySecurity{} m, ok := compiler.UnpackMap(in) @@ -94,68 +95,68 @@ func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecuri // string type = 1; v1 := compiler.MapValueForKey(m, "type") if v1 != nil { - x.Type, ok = v1.(string) + x.Type, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [apiKey] if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string name = 2; v2 := compiler.MapValueForKey(m, "name") if v2 != nil { - x.Name, ok = v2.(string) + x.Name, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string in = 3; v3 := compiler.MapValueForKey(m, "in") if v3 != nil { - x.In, ok = v3.(string) + x.In, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [header query] if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 4; v4 := compiler.MapValueForKey(m, "description") if v4 != nil { - x.Description, ok = v4.(string) + x.Description, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 5; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -175,7 +176,7 @@ func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecuri } // NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. -func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (*BasicAuthenticationSecurity, error) { +func NewBasicAuthenticationSecurity(in *yaml.Node, context *compiler.Context) (*BasicAuthenticationSecurity, error) { errors := make([]error, 0) x := &BasicAuthenticationSecurity{} m, ok := compiler.UnpackMap(in) @@ -199,44 +200,44 @@ func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) ( // string type = 1; v1 := compiler.MapValueForKey(m, "type") if v1 != nil { - x.Type, ok = v1.(string) + x.Type, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [basic] if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 2; v2 := compiler.MapValueForKey(m, "description") if v2 != nil { - x.Description, ok = v2.(string) + x.Description, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 3; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -256,7 +257,7 @@ func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) ( } // NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. -func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter, error) { +func NewBodyParameter(in *yaml.Node, context *compiler.Context) (*BodyParameter, error) { errors := make([]error, 0) x := &BodyParameter{} m, ok := compiler.UnpackMap(in) @@ -280,42 +281,42 @@ func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter // string description = 1; v1 := compiler.MapValueForKey(m, "description") if v1 != nil { - x.Description, ok = v1.(string) + x.Description, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string name = 2; v2 := compiler.MapValueForKey(m, "name") if v2 != nil { - x.Name, ok = v2.(string) + x.Name, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string in = 3; v3 := compiler.MapValueForKey(m, "in") if v3 != nil { - x.In, ok = v3.(string) + x.In, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [body] if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // bool required = 4; v4 := compiler.MapValueForKey(m, "required") if v4 != nil { - x.Required, ok = v4.(bool) + x.Required, ok = compiler.BoolForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } @@ -331,20 +332,20 @@ func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter // repeated NamedAny vendor_extension = 6; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -364,7 +365,7 @@ func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter } // NewContact creates an object of type Contact if possible, returning an error if not. -func NewContact(in interface{}, context *compiler.Context) (*Contact, error) { +func NewContact(in *yaml.Node, context *compiler.Context) (*Contact, error) { errors := make([]error, 0) x := &Contact{} m, ok := compiler.UnpackMap(in) @@ -382,47 +383,47 @@ func NewContact(in interface{}, context *compiler.Context) (*Contact, error) { // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string url = 2; v2 := compiler.MapValueForKey(m, "url") if v2 != nil { - x.Url, ok = v2.(string) + x.Url, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string email = 3; v3 := compiler.MapValueForKey(m, "email") if v3 != nil { - x.Email, ok = v3.(string) + x.Email, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for email: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for email: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 4; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -442,7 +443,7 @@ func NewContact(in interface{}, context *compiler.Context) (*Contact, error) { } // NewDefault creates an object of type Default if possible, returning an error if not. -func NewDefault(in interface{}, context *compiler.Context) (*Default, error) { +func NewDefault(in *yaml.Node, context *compiler.Context) (*Default, error) { errors := make([]error, 0) x := &Default{} m, ok := compiler.UnpackMap(in) @@ -453,19 +454,19 @@ func NewDefault(in interface{}, context *compiler.Context) (*Default, error) { // repeated NamedAny additional_properties = 1; // MAP: Any x.AdditionalProperties = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -484,7 +485,7 @@ func NewDefault(in interface{}, context *compiler.Context) (*Default, error) { } // NewDefinitions creates an object of type Definitions if possible, returning an error if not. -func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, error) { +func NewDefinitions(in *yaml.Node, context *compiler.Context) (*Definitions, error) { errors := make([]error, 0) x := &Definitions{} m, ok := compiler.UnpackMap(in) @@ -495,10 +496,10 @@ func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, er // repeated NamedSchema additional_properties = 1; // MAP: Schema x.AdditionalProperties = make([]*NamedSchema, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedSchema{} pair.Name = k var err error @@ -514,7 +515,7 @@ func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, er } // NewDocument creates an object of type Document if possible, returning an error if not. -func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { +func NewDocument(in *yaml.Node, context *compiler.Context) (*Document, error) { errors := make([]error, 0) x := &Document{} m, ok := compiler.UnpackMap(in) @@ -538,15 +539,15 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { // string swagger = 1; v1 := compiler.MapValueForKey(m, "swagger") if v1 != nil { - x.Swagger, ok = v1.(string) + x.Swagger, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [2.0] if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { - message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -562,57 +563,57 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { // string host = 3; v3 := compiler.MapValueForKey(m, "host") if v3 != nil { - x.Host, ok = v3.(string) + x.Host, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for host: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for host: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // string base_path = 4; v4 := compiler.MapValueForKey(m, "basePath") if v4 != nil { - x.BasePath, ok = v4.(string) + x.BasePath, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for basePath: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for basePath: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // repeated string schemes = 5; v5 := compiler.MapValueForKey(m, "schemes") if v5 != nil { - v, ok := v5.([]interface{}) + v, ok := compiler.SequenceNodeForNode(v5) if ok { - x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + x.Schemes = compiler.StringArrayForSequenceNode(v) } else { - message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [http https ws wss] if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { - message := fmt.Sprintf("has unexpected value for schemes: %+v", v5) + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // repeated string consumes = 6; v6 := compiler.MapValueForKey(m, "consumes") if v6 != nil { - v, ok := v6.([]interface{}) + v, ok := compiler.SequenceNodeForNode(v6) if ok { - x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + x.Consumes = compiler.StringArrayForSequenceNode(v) } else { - message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for consumes: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } // repeated string produces = 7; v7 := compiler.MapValueForKey(m, "produces") if v7 != nil { - v, ok := v7.([]interface{}) + v, ok := compiler.SequenceNodeForNode(v7) if ok { - x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + x.Produces = compiler.StringArrayForSequenceNode(v) } else { - message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v7, v7) + message := fmt.Sprintf("has unexpected value for produces: %s", compiler.Display(v7)) errors = append(errors, compiler.NewError(context, message)) } } @@ -657,9 +658,9 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { if v12 != nil { // repeated SecurityRequirement x.Security = make([]*SecurityRequirement, 0) - a, ok := v12.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v12) if ok { - for _, item := range a { + for _, item := range a.Content { y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) if err != nil { errors = append(errors, err) @@ -682,9 +683,9 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { if v14 != nil { // repeated Tag x.Tags = make([]*Tag, 0) - a, ok := v14.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v14) if ok { - for _, item := range a { + for _, item := range a.Content { y, err := NewTag(item, compiler.NewContext("tags", context)) if err != nil { errors = append(errors, err) @@ -705,20 +706,20 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { // repeated NamedAny vendor_extension = 16; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -738,7 +739,7 @@ func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { } // NewExamples creates an object of type Examples if possible, returning an error if not. -func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) { +func NewExamples(in *yaml.Node, context *compiler.Context) (*Examples, error) { errors := make([]error, 0) x := &Examples{} m, ok := compiler.UnpackMap(in) @@ -749,19 +750,19 @@ func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) { // repeated NamedAny additional_properties = 1; // MAP: Any x.AdditionalProperties = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -780,7 +781,7 @@ func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) { } // NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. -func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, error) { +func NewExternalDocs(in *yaml.Node, context *compiler.Context) (*ExternalDocs, error) { errors := make([]error, 0) x := &ExternalDocs{} m, ok := compiler.UnpackMap(in) @@ -804,38 +805,38 @@ func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, // string description = 1; v1 := compiler.MapValueForKey(m, "description") if v1 != nil { - x.Description, ok = v1.(string) + x.Description, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string url = 2; v2 := compiler.MapValueForKey(m, "url") if v2 != nil { - x.Url, ok = v2.(string) + x.Url, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 3; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -855,7 +856,7 @@ func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, } // NewFileSchema creates an object of type FileSchema if possible, returning an error if not. -func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, error) { +func NewFileSchema(in *yaml.Node, context *compiler.Context) (*FileSchema, error) { errors := make([]error, 0) x := &FileSchema{} m, ok := compiler.UnpackMap(in) @@ -879,27 +880,27 @@ func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, erro // string format = 1; v1 := compiler.MapValueForKey(m, "format") if v1 != nil { - x.Format, ok = v1.(string) + x.Format, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string title = 2; v2 := compiler.MapValueForKey(m, "title") if v2 != nil { - x.Title, ok = v2.(string) + x.Title, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 3; v3 := compiler.MapValueForKey(m, "description") if v3 != nil { - x.Description, ok = v3.(string) + x.Description, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } @@ -915,35 +916,35 @@ func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, erro // repeated string required = 5; v5 := compiler.MapValueForKey(m, "required") if v5 != nil { - v, ok := v5.([]interface{}) + v, ok := compiler.SequenceNodeForNode(v5) if ok { - x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + x.Required = compiler.StringArrayForSequenceNode(v) } else { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // string type = 6; v6 := compiler.MapValueForKey(m, "type") if v6 != nil { - x.Type, ok = v6.(string) + x.Type, ok = compiler.StringForScalarNode(v6) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [file] if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } // bool read_only = 7; v7 := compiler.MapValueForKey(m, "readOnly") if v7 != nil { - x.ReadOnly, ok = v7.(bool) + x.ReadOnly, ok = compiler.BoolForScalarNode(v7) if !ok { - message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v7, v7) + message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v7)) errors = append(errors, compiler.NewError(context, message)) } } @@ -968,20 +969,20 @@ func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, erro // repeated NamedAny vendor_extension = 10; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -1001,7 +1002,7 @@ func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, erro } // NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not. -func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*FormDataParameterSubSchema, error) { +func NewFormDataParameterSubSchema(in *yaml.Node, context *compiler.Context) (*FormDataParameterSubSchema, error) { errors := make([]error, 0) x := &FormDataParameterSubSchema{} m, ok := compiler.UnpackMap(in) @@ -1019,75 +1020,75 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (* // bool required = 1; v1 := compiler.MapValueForKey(m, "required") if v1 != nil { - x.Required, ok = v1.(bool) + x.Required, ok = compiler.BoolForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string in = 2; v2 := compiler.MapValueForKey(m, "in") if v2 != nil { - x.In, ok = v2.(string) + x.In, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [formData] if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 3; v3 := compiler.MapValueForKey(m, "description") if v3 != nil { - x.Description, ok = v3.(string) + x.Description, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // string name = 4; v4 := compiler.MapValueForKey(m, "name") if v4 != nil { - x.Name, ok = v4.(string) + x.Name, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // bool allow_empty_value = 5; v5 := compiler.MapValueForKey(m, "allowEmptyValue") if v5 != nil { - x.AllowEmptyValue, ok = v5.(bool) + x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // string type = 6; v6 := compiler.MapValueForKey(m, "type") if v6 != nil { - x.Type, ok = v6.(string) + x.Type, ok = compiler.StringForScalarNode(v6) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [string number boolean integer array file] if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } // string format = 7; v7 := compiler.MapValueForKey(m, "format") if v7 != nil { - x.Format, ok = v7.(string) + x.Format, ok = compiler.StringForScalarNode(v7) if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v7)) errors = append(errors, compiler.NewError(context, message)) } } @@ -1103,15 +1104,15 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (* // string collection_format = 9; v9 := compiler.MapValueForKey(m, "collectionFormat") if v9 != nil { - x.CollectionFormat, ok = v9.(string) + x.CollectionFormat, ok = compiler.StringForScalarNode(v9) if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [csv ssv tsv pipes multi] if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) errors = append(errors, compiler.NewError(context, message)) } } @@ -1127,126 +1128,102 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (* // float maximum = 11; v11 := compiler.MapValueForKey(m, "maximum") if v11 != nil { - switch v11 := v11.(type) { - case float64: - x.Maximum = v11 - case float32: - x.Maximum = float64(v11) - case uint64: - x.Maximum = float64(v11) - case uint32: - x.Maximum = float64(v11) - case int64: - x.Maximum = float64(v11) - case int32: - x.Maximum = float64(v11) - case int: - x.Maximum = float64(v11) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + v, ok := compiler.FloatForScalarNode(v11) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_maximum = 12; v12 := compiler.MapValueForKey(m, "exclusiveMaximum") if v12 != nil { - x.ExclusiveMaximum, ok = v12.(bool) + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12)) errors = append(errors, compiler.NewError(context, message)) } } // float minimum = 13; v13 := compiler.MapValueForKey(m, "minimum") if v13 != nil { - switch v13 := v13.(type) { - case float64: - x.Minimum = v13 - case float32: - x.Minimum = float64(v13) - case uint64: - x.Minimum = float64(v13) - case uint32: - x.Minimum = float64(v13) - case int64: - x.Minimum = float64(v13) - case int32: - x.Minimum = float64(v13) - case int: - x.Minimum = float64(v13) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + v, ok := compiler.FloatForScalarNode(v13) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_minimum = 14; v14 := compiler.MapValueForKey(m, "exclusiveMinimum") if v14 != nil { - x.ExclusiveMinimum, ok = v14.(bool) + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_length = 15; v15 := compiler.MapValueForKey(m, "maxLength") if v15 != nil { - t, ok := v15.(int) + t, ok := compiler.IntForScalarNode(v15) if ok { x.MaxLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_length = 16; v16 := compiler.MapValueForKey(m, "minLength") if v16 != nil { - t, ok := v16.(int) + t, ok := compiler.IntForScalarNode(v16) if ok { x.MinLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16)) errors = append(errors, compiler.NewError(context, message)) } } // string pattern = 17; v17 := compiler.MapValueForKey(m, "pattern") if v17 != nil { - x.Pattern, ok = v17.(string) + x.Pattern, ok = compiler.StringForScalarNode(v17) if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_items = 18; v18 := compiler.MapValueForKey(m, "maxItems") if v18 != nil { - t, ok := v18.(int) + t, ok := compiler.IntForScalarNode(v18) if ok { x.MaxItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_items = 19; v19 := compiler.MapValueForKey(m, "minItems") if v19 != nil { - t, ok := v19.(int) + t, ok := compiler.IntForScalarNode(v19) if ok { x.MinItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19)) errors = append(errors, compiler.NewError(context, message)) } } // bool unique_items = 20; v20 := compiler.MapValueForKey(m, "uniqueItems") if v20 != nil { - x.UniqueItems, ok = v20.(bool) + x.UniqueItems, ok = compiler.BoolForScalarNode(v20) if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20)) errors = append(errors, compiler.NewError(context, message)) } } @@ -1255,9 +1232,9 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (* if v21 != nil { // repeated Any x.Enum = make([]*Any, 0) - a, ok := v21.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v21) if ok { - for _, item := range a { + for _, item := range a.Content { y, err := NewAny(item, compiler.NewContext("enum", context)) if err != nil { errors = append(errors, err) @@ -1269,43 +1246,31 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (* // float multiple_of = 22; v22 := compiler.MapValueForKey(m, "multipleOf") if v22 != nil { - switch v22 := v22.(type) { - case float64: - x.MultipleOf = v22 - case float32: - x.MultipleOf = float64(v22) - case uint64: - x.MultipleOf = float64(v22) - case uint32: - x.MultipleOf = float64(v22) - case int64: - x.MultipleOf = float64(v22) - case int32: - x.MultipleOf = float64(v22) - case int: - x.MultipleOf = float64(v22) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + v, ok := compiler.FloatForScalarNode(v22) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v22)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 23; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -1325,7 +1290,7 @@ func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (* } // NewHeader creates an object of type Header if possible, returning an error if not. -func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { +func NewHeader(in *yaml.Node, context *compiler.Context) (*Header, error) { errors := make([]error, 0) x := &Header{} m, ok := compiler.UnpackMap(in) @@ -1349,24 +1314,24 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { // string type = 1; v1 := compiler.MapValueForKey(m, "type") if v1 != nil { - x.Type, ok = v1.(string) + x.Type, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [string number integer boolean array] if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string format = 2; v2 := compiler.MapValueForKey(m, "format") if v2 != nil { - x.Format, ok = v2.(string) + x.Format, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } @@ -1382,15 +1347,15 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { // string collection_format = 4; v4 := compiler.MapValueForKey(m, "collectionFormat") if v4 != nil { - x.CollectionFormat, ok = v4.(string) + x.CollectionFormat, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [csv ssv tsv pipes] if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } @@ -1406,126 +1371,102 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { // float maximum = 6; v6 := compiler.MapValueForKey(m, "maximum") if v6 != nil { - switch v6 := v6.(type) { - case float64: - x.Maximum = v6 - case float32: - x.Maximum = float64(v6) - case uint64: - x.Maximum = float64(v6) - case uint32: - x.Maximum = float64(v6) - case int64: - x.Maximum = float64(v6) - case int32: - x.Maximum = float64(v6) - case int: - x.Maximum = float64(v6) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + v, ok := compiler.FloatForScalarNode(v6) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_maximum = 7; v7 := compiler.MapValueForKey(m, "exclusiveMaximum") if v7 != nil { - x.ExclusiveMaximum, ok = v7.(bool) + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v7) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v7)) errors = append(errors, compiler.NewError(context, message)) } } // float minimum = 8; v8 := compiler.MapValueForKey(m, "minimum") if v8 != nil { - switch v8 := v8.(type) { - case float64: - x.Minimum = v8 - case float32: - x.Minimum = float64(v8) - case uint64: - x.Minimum = float64(v8) - case uint32: - x.Minimum = float64(v8) - case int64: - x.Minimum = float64(v8) - case int32: - x.Minimum = float64(v8) - case int: - x.Minimum = float64(v8) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + v, ok := compiler.FloatForScalarNode(v8) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v8)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_minimum = 9; v9 := compiler.MapValueForKey(m, "exclusiveMinimum") if v9 != nil { - x.ExclusiveMinimum, ok = v9.(bool) + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v9) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v9)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_length = 10; v10 := compiler.MapValueForKey(m, "maxLength") if v10 != nil { - t, ok := v10.(int) + t, ok := compiler.IntForScalarNode(v10) if ok { x.MaxLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v10)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_length = 11; v11 := compiler.MapValueForKey(m, "minLength") if v11 != nil { - t, ok := v11.(int) + t, ok := compiler.IntForScalarNode(v11) if ok { x.MinLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v11)) errors = append(errors, compiler.NewError(context, message)) } } // string pattern = 12; v12 := compiler.MapValueForKey(m, "pattern") if v12 != nil { - x.Pattern, ok = v12.(string) + x.Pattern, ok = compiler.StringForScalarNode(v12) if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v12)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_items = 13; v13 := compiler.MapValueForKey(m, "maxItems") if v13 != nil { - t, ok := v13.(int) + t, ok := compiler.IntForScalarNode(v13) if ok { x.MaxItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v13)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_items = 14; v14 := compiler.MapValueForKey(m, "minItems") if v14 != nil { - t, ok := v14.(int) + t, ok := compiler.IntForScalarNode(v14) if ok { x.MinItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v14)) errors = append(errors, compiler.NewError(context, message)) } } // bool unique_items = 15; v15 := compiler.MapValueForKey(m, "uniqueItems") if v15 != nil { - x.UniqueItems, ok = v15.(bool) + x.UniqueItems, ok = compiler.BoolForScalarNode(v15) if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v15)) errors = append(errors, compiler.NewError(context, message)) } } @@ -1534,9 +1475,9 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { if v16 != nil { // repeated Any x.Enum = make([]*Any, 0) - a, ok := v16.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v16) if ok { - for _, item := range a { + for _, item := range a.Content { y, err := NewAny(item, compiler.NewContext("enum", context)) if err != nil { errors = append(errors, err) @@ -1548,52 +1489,40 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { // float multiple_of = 17; v17 := compiler.MapValueForKey(m, "multipleOf") if v17 != nil { - switch v17 := v17.(type) { - case float64: - x.MultipleOf = v17 - case float32: - x.MultipleOf = float64(v17) - case uint64: - x.MultipleOf = float64(v17) - case uint32: - x.MultipleOf = float64(v17) - case int64: - x.MultipleOf = float64(v17) - case int32: - x.MultipleOf = float64(v17) - case int: - x.MultipleOf = float64(v17) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + v, ok := compiler.FloatForScalarNode(v17) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v17)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 18; v18 := compiler.MapValueForKey(m, "description") if v18 != nil { - x.Description, ok = v18.(string) + x.Description, ok = compiler.StringForScalarNode(v18) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v18, v18) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v18)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 19; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -1613,7 +1542,7 @@ func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { } // NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not. -func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*HeaderParameterSubSchema, error) { +func NewHeaderParameterSubSchema(in *yaml.Node, context *compiler.Context) (*HeaderParameterSubSchema, error) { errors := make([]error, 0) x := &HeaderParameterSubSchema{} m, ok := compiler.UnpackMap(in) @@ -1631,66 +1560,66 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He // bool required = 1; v1 := compiler.MapValueForKey(m, "required") if v1 != nil { - x.Required, ok = v1.(bool) + x.Required, ok = compiler.BoolForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string in = 2; v2 := compiler.MapValueForKey(m, "in") if v2 != nil { - x.In, ok = v2.(string) + x.In, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [header] if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 3; v3 := compiler.MapValueForKey(m, "description") if v3 != nil { - x.Description, ok = v3.(string) + x.Description, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // string name = 4; v4 := compiler.MapValueForKey(m, "name") if v4 != nil { - x.Name, ok = v4.(string) + x.Name, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // string type = 5; v5 := compiler.MapValueForKey(m, "type") if v5 != nil { - x.Type, ok = v5.(string) + x.Type, ok = compiler.StringForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [string number boolean integer array] if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // string format = 6; v6 := compiler.MapValueForKey(m, "format") if v6 != nil { - x.Format, ok = v6.(string) + x.Format, ok = compiler.StringForScalarNode(v6) if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } @@ -1706,15 +1635,15 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He // string collection_format = 8; v8 := compiler.MapValueForKey(m, "collectionFormat") if v8 != nil { - x.CollectionFormat, ok = v8.(string) + x.CollectionFormat, ok = compiler.StringForScalarNode(v8) if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [csv ssv tsv pipes] if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) errors = append(errors, compiler.NewError(context, message)) } } @@ -1730,126 +1659,102 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He // float maximum = 10; v10 := compiler.MapValueForKey(m, "maximum") if v10 != nil { - switch v10 := v10.(type) { - case float64: - x.Maximum = v10 - case float32: - x.Maximum = float64(v10) - case uint64: - x.Maximum = float64(v10) - case uint32: - x.Maximum = float64(v10) - case int64: - x.Maximum = float64(v10) - case int32: - x.Maximum = float64(v10) - case int: - x.Maximum = float64(v10) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + v, ok := compiler.FloatForScalarNode(v10) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v10)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_maximum = 11; v11 := compiler.MapValueForKey(m, "exclusiveMaximum") if v11 != nil { - x.ExclusiveMaximum, ok = v11.(bool) + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v11) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v11)) errors = append(errors, compiler.NewError(context, message)) } } // float minimum = 12; v12 := compiler.MapValueForKey(m, "minimum") if v12 != nil { - switch v12 := v12.(type) { - case float64: - x.Minimum = v12 - case float32: - x.Minimum = float64(v12) - case uint64: - x.Minimum = float64(v12) - case uint32: - x.Minimum = float64(v12) - case int64: - x.Minimum = float64(v12) - case int32: - x.Minimum = float64(v12) - case int: - x.Minimum = float64(v12) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + v, ok := compiler.FloatForScalarNode(v12) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v12)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_minimum = 13; v13 := compiler.MapValueForKey(m, "exclusiveMinimum") if v13 != nil { - x.ExclusiveMinimum, ok = v13.(bool) + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v13) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v13)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_length = 14; v14 := compiler.MapValueForKey(m, "maxLength") if v14 != nil { - t, ok := v14.(int) + t, ok := compiler.IntForScalarNode(v14) if ok { x.MaxLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v14)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_length = 15; v15 := compiler.MapValueForKey(m, "minLength") if v15 != nil { - t, ok := v15.(int) + t, ok := compiler.IntForScalarNode(v15) if ok { x.MinLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v15)) errors = append(errors, compiler.NewError(context, message)) } } // string pattern = 16; v16 := compiler.MapValueForKey(m, "pattern") if v16 != nil { - x.Pattern, ok = v16.(string) + x.Pattern, ok = compiler.StringForScalarNode(v16) if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v16)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_items = 17; v17 := compiler.MapValueForKey(m, "maxItems") if v17 != nil { - t, ok := v17.(int) + t, ok := compiler.IntForScalarNode(v17) if ok { x.MaxItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v17)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_items = 18; v18 := compiler.MapValueForKey(m, "minItems") if v18 != nil { - t, ok := v18.(int) + t, ok := compiler.IntForScalarNode(v18) if ok { x.MinItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v18)) errors = append(errors, compiler.NewError(context, message)) } } // bool unique_items = 19; v19 := compiler.MapValueForKey(m, "uniqueItems") if v19 != nil { - x.UniqueItems, ok = v19.(bool) + x.UniqueItems, ok = compiler.BoolForScalarNode(v19) if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v19)) errors = append(errors, compiler.NewError(context, message)) } } @@ -1858,9 +1763,9 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He if v20 != nil { // repeated Any x.Enum = make([]*Any, 0) - a, ok := v20.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v20) if ok { - for _, item := range a { + for _, item := range a.Content { y, err := NewAny(item, compiler.NewContext("enum", context)) if err != nil { errors = append(errors, err) @@ -1872,43 +1777,31 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He // float multiple_of = 21; v21 := compiler.MapValueForKey(m, "multipleOf") if v21 != nil { - switch v21 := v21.(type) { - case float64: - x.MultipleOf = v21 - case float32: - x.MultipleOf = float64(v21) - case uint64: - x.MultipleOf = float64(v21) - case uint32: - x.MultipleOf = float64(v21) - case int64: - x.MultipleOf = float64(v21) - case int32: - x.MultipleOf = float64(v21) - case int: - x.MultipleOf = float64(v21) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + v, ok := compiler.FloatForScalarNode(v21) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v21)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 22; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -1928,7 +1821,7 @@ func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*He } // NewHeaders creates an object of type Headers if possible, returning an error if not. -func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) { +func NewHeaders(in *yaml.Node, context *compiler.Context) (*Headers, error) { errors := make([]error, 0) x := &Headers{} m, ok := compiler.UnpackMap(in) @@ -1939,10 +1832,10 @@ func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) { // repeated NamedHeader additional_properties = 1; // MAP: Header x.AdditionalProperties = make([]*NamedHeader, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedHeader{} pair.Name = k var err error @@ -1958,7 +1851,7 @@ func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) { } // NewInfo creates an object of type Info if possible, returning an error if not. -func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { +func NewInfo(in *yaml.Node, context *compiler.Context) (*Info, error) { errors := make([]error, 0) x := &Info{} m, ok := compiler.UnpackMap(in) @@ -1982,36 +1875,36 @@ func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { // string title = 1; v1 := compiler.MapValueForKey(m, "title") if v1 != nil { - x.Title, ok = v1.(string) + x.Title, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string version = 2; v2 := compiler.MapValueForKey(m, "version") if v2 != nil { - x.Version, ok = v2.(string) + x.Version, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for version: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for version: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 3; v3 := compiler.MapValueForKey(m, "description") if v3 != nil { - x.Description, ok = v3.(string) + x.Description, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // string terms_of_service = 4; v4 := compiler.MapValueForKey(m, "termsOfService") if v4 != nil { - x.TermsOfService, ok = v4.(string) + x.TermsOfService, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for termsOfService: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for termsOfService: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2036,20 +1929,20 @@ func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { // repeated NamedAny vendor_extension = 7; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -2069,7 +1962,7 @@ func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { } // NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. -func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) { +func NewItemsItem(in *yaml.Node, context *compiler.Context) (*ItemsItem, error) { errors := make([]error, 0) x := &ItemsItem{} m, ok := compiler.UnpackMap(in) @@ -2088,7 +1981,7 @@ func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) } // NewJsonReference creates an object of type JsonReference if possible, returning an error if not. -func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference, error) { +func NewJsonReference(in *yaml.Node, context *compiler.Context) (*JsonReference, error) { errors := make([]error, 0) x := &JsonReference{} m, ok := compiler.UnpackMap(in) @@ -2102,28 +1995,21 @@ func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) errors = append(errors, compiler.NewError(context, message)) } - allowedKeys := []string{"$ref", "description"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } // string _ref = 1; v1 := compiler.MapValueForKey(m, "$ref") if v1 != nil { - x.XRef, ok = v1.(string) + x.XRef, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 2; v2 := compiler.MapValueForKey(m, "description") if v2 != nil { - x.Description, ok = v2.(string) + x.Description, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2132,7 +2018,7 @@ func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference } // NewLicense creates an object of type License if possible, returning an error if not. -func NewLicense(in interface{}, context *compiler.Context) (*License, error) { +func NewLicense(in *yaml.Node, context *compiler.Context) (*License, error) { errors := make([]error, 0) x := &License{} m, ok := compiler.UnpackMap(in) @@ -2156,38 +2042,38 @@ func NewLicense(in interface{}, context *compiler.Context) (*License, error) { // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string url = 2; v2 := compiler.MapValueForKey(m, "url") if v2 != nil { - x.Url, ok = v2.(string) + x.Url, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 3; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -2207,7 +2093,7 @@ func NewLicense(in interface{}, context *compiler.Context) (*License, error) { } // NewNamedAny creates an object of type NamedAny if possible, returning an error if not. -func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) { +func NewNamedAny(in *yaml.Node, context *compiler.Context) (*NamedAny, error) { errors := make([]error, 0) x := &NamedAny{} m, ok := compiler.UnpackMap(in) @@ -2225,9 +2111,9 @@ func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) { // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2245,7 +2131,7 @@ func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) { } // NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not. -func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, error) { +func NewNamedHeader(in *yaml.Node, context *compiler.Context) (*NamedHeader, error) { errors := make([]error, 0) x := &NamedHeader{} m, ok := compiler.UnpackMap(in) @@ -2263,9 +2149,9 @@ func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, er // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2283,7 +2169,7 @@ func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, er } // NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not. -func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParameter, error) { +func NewNamedParameter(in *yaml.Node, context *compiler.Context) (*NamedParameter, error) { errors := make([]error, 0) x := &NamedParameter{} m, ok := compiler.UnpackMap(in) @@ -2301,9 +2187,9 @@ func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParamet // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2321,7 +2207,7 @@ func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParamet } // NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. -func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem, error) { +func NewNamedPathItem(in *yaml.Node, context *compiler.Context) (*NamedPathItem, error) { errors := make([]error, 0) x := &NamedPathItem{} m, ok := compiler.UnpackMap(in) @@ -2339,9 +2225,9 @@ func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2359,7 +2245,7 @@ func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem } // NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not. -func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse, error) { +func NewNamedResponse(in *yaml.Node, context *compiler.Context) (*NamedResponse, error) { errors := make([]error, 0) x := &NamedResponse{} m, ok := compiler.UnpackMap(in) @@ -2377,9 +2263,9 @@ func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2397,7 +2283,7 @@ func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse } // NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not. -func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedResponseValue, error) { +func NewNamedResponseValue(in *yaml.Node, context *compiler.Context) (*NamedResponseValue, error) { errors := make([]error, 0) x := &NamedResponseValue{} m, ok := compiler.UnpackMap(in) @@ -2415,9 +2301,9 @@ func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedRes // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2435,7 +2321,7 @@ func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedRes } // NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not. -func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, error) { +func NewNamedSchema(in *yaml.Node, context *compiler.Context) (*NamedSchema, error) { errors := make([]error, 0) x := &NamedSchema{} m, ok := compiler.UnpackMap(in) @@ -2453,9 +2339,9 @@ func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, er // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2473,7 +2359,7 @@ func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, er } // NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not. -func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { +func NewNamedSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { errors := make([]error, 0) x := &NamedSecurityDefinitionsItem{} m, ok := compiler.UnpackMap(in) @@ -2491,9 +2377,9 @@ func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2511,7 +2397,7 @@ func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) } // NewNamedString creates an object of type NamedString if possible, returning an error if not. -func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, error) { +func NewNamedString(in *yaml.Node, context *compiler.Context) (*NamedString, error) { errors := make([]error, 0) x := &NamedString{} m, ok := compiler.UnpackMap(in) @@ -2529,18 +2415,18 @@ func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, er // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string value = 2; v2 := compiler.MapValueForKey(m, "value") if v2 != nil { - x.Value, ok = v2.(string) + x.Value, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for value: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for value: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2549,7 +2435,7 @@ func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, er } // NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. -func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStringArray, error) { +func NewNamedStringArray(in *yaml.Node, context *compiler.Context) (*NamedStringArray, error) { errors := make([]error, 0) x := &NamedStringArray{} m, ok := compiler.UnpackMap(in) @@ -2567,9 +2453,9 @@ func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStrin // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2587,7 +2473,7 @@ func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStrin } // NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not. -func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyParameter, error) { +func NewNonBodyParameter(in *yaml.Node, context *compiler.Context) (*NonBodyParameter, error) { errors := make([]error, 0) x := &NonBodyParameter{} matched := false @@ -2655,7 +2541,7 @@ func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyPar } // NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not. -func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { +func NewOauth2AccessCodeSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { errors := make([]error, 0) x := &Oauth2AccessCodeSecurity{} m, ok := compiler.UnpackMap(in) @@ -2679,30 +2565,30 @@ func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oa // string type = 1; v1 := compiler.MapValueForKey(m, "type") if v1 != nil { - x.Type, ok = v1.(string) + x.Type, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [oauth2] if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string flow = 2; v2 := compiler.MapValueForKey(m, "flow") if v2 != nil { - x.Flow, ok = v2.(string) + x.Flow, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [accessCode] if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2718,47 +2604,47 @@ func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oa // string authorization_url = 4; v4 := compiler.MapValueForKey(m, "authorizationUrl") if v4 != nil { - x.AuthorizationUrl, ok = v4.(string) + x.AuthorizationUrl, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // string token_url = 5; v5 := compiler.MapValueForKey(m, "tokenUrl") if v5 != nil { - x.TokenUrl, ok = v5.(string) + x.TokenUrl, ok = compiler.StringForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 6; v6 := compiler.MapValueForKey(m, "description") if v6 != nil { - x.Description, ok = v6.(string) + x.Description, ok = compiler.StringForScalarNode(v6) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 7; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -2778,7 +2664,7 @@ func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oa } // NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not. -func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { +func NewOauth2ApplicationSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { errors := make([]error, 0) x := &Oauth2ApplicationSecurity{} m, ok := compiler.UnpackMap(in) @@ -2802,30 +2688,30 @@ func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*O // string type = 1; v1 := compiler.MapValueForKey(m, "type") if v1 != nil { - x.Type, ok = v1.(string) + x.Type, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [oauth2] if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string flow = 2; v2 := compiler.MapValueForKey(m, "flow") if v2 != nil { - x.Flow, ok = v2.(string) + x.Flow, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [application] if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2841,38 +2727,38 @@ func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*O // string token_url = 4; v4 := compiler.MapValueForKey(m, "tokenUrl") if v4 != nil { - x.TokenUrl, ok = v4.(string) + x.TokenUrl, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 5; v5 := compiler.MapValueForKey(m, "description") if v5 != nil { - x.Description, ok = v5.(string) + x.Description, ok = compiler.StringForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 6; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -2892,7 +2778,7 @@ func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*O } // NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not. -func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { +func NewOauth2ImplicitSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { errors := make([]error, 0) x := &Oauth2ImplicitSecurity{} m, ok := compiler.UnpackMap(in) @@ -2916,30 +2802,30 @@ func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oaut // string type = 1; v1 := compiler.MapValueForKey(m, "type") if v1 != nil { - x.Type, ok = v1.(string) + x.Type, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [oauth2] if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string flow = 2; v2 := compiler.MapValueForKey(m, "flow") if v2 != nil { - x.Flow, ok = v2.(string) + x.Flow, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [implicit] if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } @@ -2955,38 +2841,38 @@ func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oaut // string authorization_url = 4; v4 := compiler.MapValueForKey(m, "authorizationUrl") if v4 != nil { - x.AuthorizationUrl, ok = v4.(string) + x.AuthorizationUrl, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 5; v5 := compiler.MapValueForKey(m, "description") if v5 != nil { - x.Description, ok = v5.(string) + x.Description, ok = compiler.StringForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 6; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -3006,7 +2892,7 @@ func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oaut } // NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not. -func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oauth2PasswordSecurity, error) { +func NewOauth2PasswordSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2PasswordSecurity, error) { errors := make([]error, 0) x := &Oauth2PasswordSecurity{} m, ok := compiler.UnpackMap(in) @@ -3030,30 +2916,30 @@ func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oaut // string type = 1; v1 := compiler.MapValueForKey(m, "type") if v1 != nil { - x.Type, ok = v1.(string) + x.Type, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [oauth2] if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string flow = 2; v2 := compiler.MapValueForKey(m, "flow") if v2 != nil { - x.Flow, ok = v2.(string) + x.Flow, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [password] if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } @@ -3069,38 +2955,38 @@ func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oaut // string token_url = 4; v4 := compiler.MapValueForKey(m, "tokenUrl") if v4 != nil { - x.TokenUrl, ok = v4.(string) + x.TokenUrl, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 5; v5 := compiler.MapValueForKey(m, "description") if v5 != nil { - x.Description, ok = v5.(string) + x.Description, ok = compiler.StringForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 6; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -3120,7 +3006,7 @@ func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oaut } // NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not. -func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, error) { +func NewOauth2Scopes(in *yaml.Node, context *compiler.Context) (*Oauth2Scopes, error) { errors := make([]error, 0) x := &Oauth2Scopes{} m, ok := compiler.UnpackMap(in) @@ -3131,13 +3017,13 @@ func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, // repeated NamedString additional_properties = 1; // MAP: string x.AdditionalProperties = make([]*NamedString, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedString{} pair.Name = k - pair.Value = v.(string) + pair.Value, _ = compiler.StringForScalarNode(v) x.AdditionalProperties = append(x.AdditionalProperties, pair) } } @@ -3146,7 +3032,7 @@ func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, } // NewOperation creates an object of type Operation if possible, returning an error if not. -func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) { +func NewOperation(in *yaml.Node, context *compiler.Context) (*Operation, error) { errors := make([]error, 0) x := &Operation{} m, ok := compiler.UnpackMap(in) @@ -3170,29 +3056,29 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) // repeated string tags = 1; v1 := compiler.MapValueForKey(m, "tags") if v1 != nil { - v, ok := v1.([]interface{}) + v, ok := compiler.SequenceNodeForNode(v1) if ok { - x.Tags = compiler.ConvertInterfaceArrayToStringArray(v) + x.Tags = compiler.StringArrayForSequenceNode(v) } else { - message := fmt.Sprintf("has unexpected value for tags: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for tags: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string summary = 2; v2 := compiler.MapValueForKey(m, "summary") if v2 != nil { - x.Summary, ok = v2.(string) + x.Summary, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for summary: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 3; v3 := compiler.MapValueForKey(m, "description") if v3 != nil { - x.Description, ok = v3.(string) + x.Description, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } @@ -3208,31 +3094,31 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) // string operation_id = 5; v5 := compiler.MapValueForKey(m, "operationId") if v5 != nil { - x.OperationId, ok = v5.(string) + x.OperationId, ok = compiler.StringForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for operationId: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for operationId: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // repeated string produces = 6; v6 := compiler.MapValueForKey(m, "produces") if v6 != nil { - v, ok := v6.([]interface{}) + v, ok := compiler.SequenceNodeForNode(v6) if ok { - x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + x.Produces = compiler.StringArrayForSequenceNode(v) } else { - message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for produces: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } // repeated string consumes = 7; v7 := compiler.MapValueForKey(m, "consumes") if v7 != nil { - v, ok := v7.([]interface{}) + v, ok := compiler.SequenceNodeForNode(v7) if ok { - x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + x.Consumes = compiler.StringArrayForSequenceNode(v) } else { - message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v7, v7) + message := fmt.Sprintf("has unexpected value for consumes: %s", compiler.Display(v7)) errors = append(errors, compiler.NewError(context, message)) } } @@ -3241,9 +3127,9 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) if v8 != nil { // repeated ParametersItem x.Parameters = make([]*ParametersItem, 0) - a, ok := v8.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v8) if ok { - for _, item := range a { + for _, item := range a.Content { y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) if err != nil { errors = append(errors, err) @@ -3264,26 +3150,26 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) // repeated string schemes = 10; v10 := compiler.MapValueForKey(m, "schemes") if v10 != nil { - v, ok := v10.([]interface{}) + v, ok := compiler.SequenceNodeForNode(v10) if ok { - x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + x.Schemes = compiler.StringArrayForSequenceNode(v) } else { - message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v10, v10) + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v10)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [http https ws wss] if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { - message := fmt.Sprintf("has unexpected value for schemes: %+v", v10) + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v10)) errors = append(errors, compiler.NewError(context, message)) } } // bool deprecated = 11; v11 := compiler.MapValueForKey(m, "deprecated") if v11 != nil { - x.Deprecated, ok = v11.(bool) + x.Deprecated, ok = compiler.BoolForScalarNode(v11) if !ok { - message := fmt.Sprintf("has unexpected value for deprecated: %+v (%T)", v11, v11) + message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v11)) errors = append(errors, compiler.NewError(context, message)) } } @@ -3292,9 +3178,9 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) if v12 != nil { // repeated SecurityRequirement x.Security = make([]*SecurityRequirement, 0) - a, ok := v12.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v12) if ok { - for _, item := range a { + for _, item := range a.Content { y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) if err != nil { errors = append(errors, err) @@ -3306,20 +3192,20 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) // repeated NamedAny vendor_extension = 13; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -3339,7 +3225,7 @@ func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) } // NewParameter creates an object of type Parameter if possible, returning an error if not. -func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) { +func NewParameter(in *yaml.Node, context *compiler.Context) (*Parameter, error) { errors := make([]error, 0) x := &Parameter{} matched := false @@ -3379,7 +3265,7 @@ func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) } // NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not. -func NewParameterDefinitions(in interface{}, context *compiler.Context) (*ParameterDefinitions, error) { +func NewParameterDefinitions(in *yaml.Node, context *compiler.Context) (*ParameterDefinitions, error) { errors := make([]error, 0) x := &ParameterDefinitions{} m, ok := compiler.UnpackMap(in) @@ -3390,10 +3276,10 @@ func NewParameterDefinitions(in interface{}, context *compiler.Context) (*Parame // repeated NamedParameter additional_properties = 1; // MAP: Parameter x.AdditionalProperties = make([]*NamedParameter, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedParameter{} pair.Name = k var err error @@ -3409,7 +3295,7 @@ func NewParameterDefinitions(in interface{}, context *compiler.Context) (*Parame } // NewParametersItem creates an object of type ParametersItem if possible, returning an error if not. -func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersItem, error) { +func NewParametersItem(in *yaml.Node, context *compiler.Context) (*ParametersItem, error) { errors := make([]error, 0) x := &ParametersItem{} matched := false @@ -3449,7 +3335,7 @@ func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersIt } // NewPathItem creates an object of type PathItem if possible, returning an error if not. -func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { +func NewPathItem(in *yaml.Node, context *compiler.Context) (*PathItem, error) { errors := make([]error, 0) x := &PathItem{} m, ok := compiler.UnpackMap(in) @@ -3467,9 +3353,9 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { // string _ref = 1; v1 := compiler.MapValueForKey(m, "$ref") if v1 != nil { - x.XRef, ok = v1.(string) + x.XRef, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -3541,9 +3427,9 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { if v9 != nil { // repeated ParametersItem x.Parameters = make([]*ParametersItem, 0) - a, ok := v9.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v9) if ok { - for _, item := range a { + for _, item := range a.Content { y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) if err != nil { errors = append(errors, err) @@ -3555,20 +3441,20 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { // repeated NamedAny vendor_extension = 10; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -3588,7 +3474,7 @@ func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { } // NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not. -func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*PathParameterSubSchema, error) { +func NewPathParameterSubSchema(in *yaml.Node, context *compiler.Context) (*PathParameterSubSchema, error) { errors := make([]error, 0) x := &PathParameterSubSchema{} m, ok := compiler.UnpackMap(in) @@ -3612,66 +3498,66 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path // bool required = 1; v1 := compiler.MapValueForKey(m, "required") if v1 != nil { - x.Required, ok = v1.(bool) + x.Required, ok = compiler.BoolForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string in = 2; v2 := compiler.MapValueForKey(m, "in") if v2 != nil { - x.In, ok = v2.(string) + x.In, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [path] if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 3; v3 := compiler.MapValueForKey(m, "description") if v3 != nil { - x.Description, ok = v3.(string) + x.Description, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // string name = 4; v4 := compiler.MapValueForKey(m, "name") if v4 != nil { - x.Name, ok = v4.(string) + x.Name, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // string type = 5; v5 := compiler.MapValueForKey(m, "type") if v5 != nil { - x.Type, ok = v5.(string) + x.Type, ok = compiler.StringForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [string number boolean integer array] if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // string format = 6; v6 := compiler.MapValueForKey(m, "format") if v6 != nil { - x.Format, ok = v6.(string) + x.Format, ok = compiler.StringForScalarNode(v6) if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } @@ -3687,15 +3573,15 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path // string collection_format = 8; v8 := compiler.MapValueForKey(m, "collectionFormat") if v8 != nil { - x.CollectionFormat, ok = v8.(string) + x.CollectionFormat, ok = compiler.StringForScalarNode(v8) if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [csv ssv tsv pipes] if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) errors = append(errors, compiler.NewError(context, message)) } } @@ -3711,126 +3597,102 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path // float maximum = 10; v10 := compiler.MapValueForKey(m, "maximum") if v10 != nil { - switch v10 := v10.(type) { - case float64: - x.Maximum = v10 - case float32: - x.Maximum = float64(v10) - case uint64: - x.Maximum = float64(v10) - case uint32: - x.Maximum = float64(v10) - case int64: - x.Maximum = float64(v10) - case int32: - x.Maximum = float64(v10) - case int: - x.Maximum = float64(v10) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + v, ok := compiler.FloatForScalarNode(v10) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v10)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_maximum = 11; v11 := compiler.MapValueForKey(m, "exclusiveMaximum") if v11 != nil { - x.ExclusiveMaximum, ok = v11.(bool) + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v11) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v11)) errors = append(errors, compiler.NewError(context, message)) } } // float minimum = 12; v12 := compiler.MapValueForKey(m, "minimum") if v12 != nil { - switch v12 := v12.(type) { - case float64: - x.Minimum = v12 - case float32: - x.Minimum = float64(v12) - case uint64: - x.Minimum = float64(v12) - case uint32: - x.Minimum = float64(v12) - case int64: - x.Minimum = float64(v12) - case int32: - x.Minimum = float64(v12) - case int: - x.Minimum = float64(v12) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + v, ok := compiler.FloatForScalarNode(v12) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v12)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_minimum = 13; v13 := compiler.MapValueForKey(m, "exclusiveMinimum") if v13 != nil { - x.ExclusiveMinimum, ok = v13.(bool) + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v13) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v13)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_length = 14; v14 := compiler.MapValueForKey(m, "maxLength") if v14 != nil { - t, ok := v14.(int) + t, ok := compiler.IntForScalarNode(v14) if ok { x.MaxLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v14)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_length = 15; v15 := compiler.MapValueForKey(m, "minLength") if v15 != nil { - t, ok := v15.(int) + t, ok := compiler.IntForScalarNode(v15) if ok { x.MinLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v15)) errors = append(errors, compiler.NewError(context, message)) } } // string pattern = 16; v16 := compiler.MapValueForKey(m, "pattern") if v16 != nil { - x.Pattern, ok = v16.(string) + x.Pattern, ok = compiler.StringForScalarNode(v16) if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v16)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_items = 17; v17 := compiler.MapValueForKey(m, "maxItems") if v17 != nil { - t, ok := v17.(int) + t, ok := compiler.IntForScalarNode(v17) if ok { x.MaxItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v17)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_items = 18; v18 := compiler.MapValueForKey(m, "minItems") if v18 != nil { - t, ok := v18.(int) + t, ok := compiler.IntForScalarNode(v18) if ok { x.MinItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v18)) errors = append(errors, compiler.NewError(context, message)) } } // bool unique_items = 19; v19 := compiler.MapValueForKey(m, "uniqueItems") if v19 != nil { - x.UniqueItems, ok = v19.(bool) + x.UniqueItems, ok = compiler.BoolForScalarNode(v19) if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v19)) errors = append(errors, compiler.NewError(context, message)) } } @@ -3839,9 +3701,9 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path if v20 != nil { // repeated Any x.Enum = make([]*Any, 0) - a, ok := v20.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v20) if ok { - for _, item := range a { + for _, item := range a.Content { y, err := NewAny(item, compiler.NewContext("enum", context)) if err != nil { errors = append(errors, err) @@ -3853,43 +3715,31 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path // float multiple_of = 21; v21 := compiler.MapValueForKey(m, "multipleOf") if v21 != nil { - switch v21 := v21.(type) { - case float64: - x.MultipleOf = v21 - case float32: - x.MultipleOf = float64(v21) - case uint64: - x.MultipleOf = float64(v21) - case uint32: - x.MultipleOf = float64(v21) - case int64: - x.MultipleOf = float64(v21) - case int32: - x.MultipleOf = float64(v21) - case int: - x.MultipleOf = float64(v21) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + v, ok := compiler.FloatForScalarNode(v21) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v21)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 22; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -3909,7 +3759,7 @@ func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*Path } // NewPaths creates an object of type Paths if possible, returning an error if not. -func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { +func NewPaths(in *yaml.Node, context *compiler.Context) (*Paths, error) { errors := make([]error, 0) x := &Paths{} m, ok := compiler.UnpackMap(in) @@ -3927,20 +3777,20 @@ func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { // repeated NamedAny vendor_extension = 1; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -3958,10 +3808,10 @@ func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { // repeated NamedPathItem path = 2; // MAP: PathItem ^/ x.Path = make([]*NamedPathItem, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "/") { pair := &NamedPathItem{} pair.Name = k @@ -3979,7 +3829,7 @@ func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { } // NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not. -func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesItems, error) { +func NewPrimitivesItems(in *yaml.Node, context *compiler.Context) (*PrimitivesItems, error) { errors := make([]error, 0) x := &PrimitivesItems{} m, ok := compiler.UnpackMap(in) @@ -3997,24 +3847,24 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI // string type = 1; v1 := compiler.MapValueForKey(m, "type") if v1 != nil { - x.Type, ok = v1.(string) + x.Type, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [string number integer boolean array] if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string format = 2; v2 := compiler.MapValueForKey(m, "format") if v2 != nil { - x.Format, ok = v2.(string) + x.Format, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } @@ -4030,15 +3880,15 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI // string collection_format = 4; v4 := compiler.MapValueForKey(m, "collectionFormat") if v4 != nil { - x.CollectionFormat, ok = v4.(string) + x.CollectionFormat, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [csv ssv tsv pipes] if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } @@ -4054,126 +3904,102 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI // float maximum = 6; v6 := compiler.MapValueForKey(m, "maximum") if v6 != nil { - switch v6 := v6.(type) { - case float64: - x.Maximum = v6 - case float32: - x.Maximum = float64(v6) - case uint64: - x.Maximum = float64(v6) - case uint32: - x.Maximum = float64(v6) - case int64: - x.Maximum = float64(v6) - case int32: - x.Maximum = float64(v6) - case int: - x.Maximum = float64(v6) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + v, ok := compiler.FloatForScalarNode(v6) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_maximum = 7; v7 := compiler.MapValueForKey(m, "exclusiveMaximum") if v7 != nil { - x.ExclusiveMaximum, ok = v7.(bool) + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v7) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v7)) errors = append(errors, compiler.NewError(context, message)) } } // float minimum = 8; v8 := compiler.MapValueForKey(m, "minimum") if v8 != nil { - switch v8 := v8.(type) { - case float64: - x.Minimum = v8 - case float32: - x.Minimum = float64(v8) - case uint64: - x.Minimum = float64(v8) - case uint32: - x.Minimum = float64(v8) - case int64: - x.Minimum = float64(v8) - case int32: - x.Minimum = float64(v8) - case int: - x.Minimum = float64(v8) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + v, ok := compiler.FloatForScalarNode(v8) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v8)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_minimum = 9; v9 := compiler.MapValueForKey(m, "exclusiveMinimum") if v9 != nil { - x.ExclusiveMinimum, ok = v9.(bool) + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v9) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v9)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_length = 10; v10 := compiler.MapValueForKey(m, "maxLength") if v10 != nil { - t, ok := v10.(int) + t, ok := compiler.IntForScalarNode(v10) if ok { x.MaxLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v10)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_length = 11; v11 := compiler.MapValueForKey(m, "minLength") if v11 != nil { - t, ok := v11.(int) + t, ok := compiler.IntForScalarNode(v11) if ok { x.MinLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v11)) errors = append(errors, compiler.NewError(context, message)) } } // string pattern = 12; v12 := compiler.MapValueForKey(m, "pattern") if v12 != nil { - x.Pattern, ok = v12.(string) + x.Pattern, ok = compiler.StringForScalarNode(v12) if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v12)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_items = 13; v13 := compiler.MapValueForKey(m, "maxItems") if v13 != nil { - t, ok := v13.(int) + t, ok := compiler.IntForScalarNode(v13) if ok { x.MaxItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v13)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_items = 14; v14 := compiler.MapValueForKey(m, "minItems") if v14 != nil { - t, ok := v14.(int) + t, ok := compiler.IntForScalarNode(v14) if ok { x.MinItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v14)) errors = append(errors, compiler.NewError(context, message)) } } // bool unique_items = 15; v15 := compiler.MapValueForKey(m, "uniqueItems") if v15 != nil { - x.UniqueItems, ok = v15.(bool) + x.UniqueItems, ok = compiler.BoolForScalarNode(v15) if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v15)) errors = append(errors, compiler.NewError(context, message)) } } @@ -4182,9 +4008,9 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI if v16 != nil { // repeated Any x.Enum = make([]*Any, 0) - a, ok := v16.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v16) if ok { - for _, item := range a { + for _, item := range a.Content { y, err := NewAny(item, compiler.NewContext("enum", context)) if err != nil { errors = append(errors, err) @@ -4196,43 +4022,31 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI // float multiple_of = 17; v17 := compiler.MapValueForKey(m, "multipleOf") if v17 != nil { - switch v17 := v17.(type) { - case float64: - x.MultipleOf = v17 - case float32: - x.MultipleOf = float64(v17) - case uint64: - x.MultipleOf = float64(v17) - case uint32: - x.MultipleOf = float64(v17) - case int64: - x.MultipleOf = float64(v17) - case int32: - x.MultipleOf = float64(v17) - case int: - x.MultipleOf = float64(v17) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + v, ok := compiler.FloatForScalarNode(v17) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v17)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 18; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -4252,7 +4066,7 @@ func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesI } // NewProperties creates an object of type Properties if possible, returning an error if not. -func NewProperties(in interface{}, context *compiler.Context) (*Properties, error) { +func NewProperties(in *yaml.Node, context *compiler.Context) (*Properties, error) { errors := make([]error, 0) x := &Properties{} m, ok := compiler.UnpackMap(in) @@ -4263,10 +4077,10 @@ func NewProperties(in interface{}, context *compiler.Context) (*Properties, erro // repeated NamedSchema additional_properties = 1; // MAP: Schema x.AdditionalProperties = make([]*NamedSchema, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedSchema{} pair.Name = k var err error @@ -4282,7 +4096,7 @@ func NewProperties(in interface{}, context *compiler.Context) (*Properties, erro } // NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not. -func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*QueryParameterSubSchema, error) { +func NewQueryParameterSubSchema(in *yaml.Node, context *compiler.Context) (*QueryParameterSubSchema, error) { errors := make([]error, 0) x := &QueryParameterSubSchema{} m, ok := compiler.UnpackMap(in) @@ -4300,75 +4114,75 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que // bool required = 1; v1 := compiler.MapValueForKey(m, "required") if v1 != nil { - x.Required, ok = v1.(bool) + x.Required, ok = compiler.BoolForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string in = 2; v2 := compiler.MapValueForKey(m, "in") if v2 != nil { - x.In, ok = v2.(string) + x.In, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [query] if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 3; v3 := compiler.MapValueForKey(m, "description") if v3 != nil { - x.Description, ok = v3.(string) + x.Description, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // string name = 4; v4 := compiler.MapValueForKey(m, "name") if v4 != nil { - x.Name, ok = v4.(string) + x.Name, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // bool allow_empty_value = 5; v5 := compiler.MapValueForKey(m, "allowEmptyValue") if v5 != nil { - x.AllowEmptyValue, ok = v5.(bool) + x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // string type = 6; v6 := compiler.MapValueForKey(m, "type") if v6 != nil { - x.Type, ok = v6.(string) + x.Type, ok = compiler.StringForScalarNode(v6) if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [string number boolean integer array] if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } // string format = 7; v7 := compiler.MapValueForKey(m, "format") if v7 != nil { - x.Format, ok = v7.(string) + x.Format, ok = compiler.StringForScalarNode(v7) if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v7)) errors = append(errors, compiler.NewError(context, message)) } } @@ -4384,15 +4198,15 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que // string collection_format = 9; v9 := compiler.MapValueForKey(m, "collectionFormat") if v9 != nil { - x.CollectionFormat, ok = v9.(string) + x.CollectionFormat, ok = compiler.StringForScalarNode(v9) if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [csv ssv tsv pipes multi] if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) errors = append(errors, compiler.NewError(context, message)) } } @@ -4408,126 +4222,102 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que // float maximum = 11; v11 := compiler.MapValueForKey(m, "maximum") if v11 != nil { - switch v11 := v11.(type) { - case float64: - x.Maximum = v11 - case float32: - x.Maximum = float64(v11) - case uint64: - x.Maximum = float64(v11) - case uint32: - x.Maximum = float64(v11) - case int64: - x.Maximum = float64(v11) - case int32: - x.Maximum = float64(v11) - case int: - x.Maximum = float64(v11) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + v, ok := compiler.FloatForScalarNode(v11) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_maximum = 12; v12 := compiler.MapValueForKey(m, "exclusiveMaximum") if v12 != nil { - x.ExclusiveMaximum, ok = v12.(bool) + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12)) errors = append(errors, compiler.NewError(context, message)) } } // float minimum = 13; v13 := compiler.MapValueForKey(m, "minimum") if v13 != nil { - switch v13 := v13.(type) { - case float64: - x.Minimum = v13 - case float32: - x.Minimum = float64(v13) - case uint64: - x.Minimum = float64(v13) - case uint32: - x.Minimum = float64(v13) - case int64: - x.Minimum = float64(v13) - case int32: - x.Minimum = float64(v13) - case int: - x.Minimum = float64(v13) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + v, ok := compiler.FloatForScalarNode(v13) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_minimum = 14; v14 := compiler.MapValueForKey(m, "exclusiveMinimum") if v14 != nil { - x.ExclusiveMinimum, ok = v14.(bool) + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_length = 15; v15 := compiler.MapValueForKey(m, "maxLength") if v15 != nil { - t, ok := v15.(int) + t, ok := compiler.IntForScalarNode(v15) if ok { x.MaxLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_length = 16; v16 := compiler.MapValueForKey(m, "minLength") if v16 != nil { - t, ok := v16.(int) + t, ok := compiler.IntForScalarNode(v16) if ok { x.MinLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16)) errors = append(errors, compiler.NewError(context, message)) } } // string pattern = 17; v17 := compiler.MapValueForKey(m, "pattern") if v17 != nil { - x.Pattern, ok = v17.(string) + x.Pattern, ok = compiler.StringForScalarNode(v17) if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_items = 18; v18 := compiler.MapValueForKey(m, "maxItems") if v18 != nil { - t, ok := v18.(int) + t, ok := compiler.IntForScalarNode(v18) if ok { x.MaxItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_items = 19; v19 := compiler.MapValueForKey(m, "minItems") if v19 != nil { - t, ok := v19.(int) + t, ok := compiler.IntForScalarNode(v19) if ok { x.MinItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19)) errors = append(errors, compiler.NewError(context, message)) } } // bool unique_items = 20; v20 := compiler.MapValueForKey(m, "uniqueItems") if v20 != nil { - x.UniqueItems, ok = v20.(bool) + x.UniqueItems, ok = compiler.BoolForScalarNode(v20) if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20)) errors = append(errors, compiler.NewError(context, message)) } } @@ -4536,9 +4326,9 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que if v21 != nil { // repeated Any x.Enum = make([]*Any, 0) - a, ok := v21.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v21) if ok { - for _, item := range a { + for _, item := range a.Content { y, err := NewAny(item, compiler.NewContext("enum", context)) if err != nil { errors = append(errors, err) @@ -4550,43 +4340,31 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que // float multiple_of = 22; v22 := compiler.MapValueForKey(m, "multipleOf") if v22 != nil { - switch v22 := v22.(type) { - case float64: - x.MultipleOf = v22 - case float32: - x.MultipleOf = float64(v22) - case uint64: - x.MultipleOf = float64(v22) - case uint32: - x.MultipleOf = float64(v22) - case int64: - x.MultipleOf = float64(v22) - case int32: - x.MultipleOf = float64(v22) - case int: - x.MultipleOf = float64(v22) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + v, ok := compiler.FloatForScalarNode(v22) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v22)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 23; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -4606,7 +4384,7 @@ func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*Que } // NewResponse creates an object of type Response if possible, returning an error if not. -func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { +func NewResponse(in *yaml.Node, context *compiler.Context) (*Response, error) { errors := make([]error, 0) x := &Response{} m, ok := compiler.UnpackMap(in) @@ -4630,9 +4408,9 @@ func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { // string description = 1; v1 := compiler.MapValueForKey(m, "description") if v1 != nil { - x.Description, ok = v1.(string) + x.Description, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } @@ -4666,20 +4444,20 @@ func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { // repeated NamedAny vendor_extension = 5; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -4699,7 +4477,7 @@ func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { } // NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not. -func NewResponseDefinitions(in interface{}, context *compiler.Context) (*ResponseDefinitions, error) { +func NewResponseDefinitions(in *yaml.Node, context *compiler.Context) (*ResponseDefinitions, error) { errors := make([]error, 0) x := &ResponseDefinitions{} m, ok := compiler.UnpackMap(in) @@ -4710,10 +4488,10 @@ func NewResponseDefinitions(in interface{}, context *compiler.Context) (*Respons // repeated NamedResponse additional_properties = 1; // MAP: Response x.AdditionalProperties = make([]*NamedResponse, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedResponse{} pair.Name = k var err error @@ -4729,7 +4507,7 @@ func NewResponseDefinitions(in interface{}, context *compiler.Context) (*Respons } // NewResponseValue creates an object of type ResponseValue if possible, returning an error if not. -func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue, error) { +func NewResponseValue(in *yaml.Node, context *compiler.Context) (*ResponseValue, error) { errors := make([]error, 0) x := &ResponseValue{} matched := false @@ -4769,7 +4547,7 @@ func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue } // NewResponses creates an object of type Responses if possible, returning an error if not. -func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) { +func NewResponses(in *yaml.Node, context *compiler.Context) (*Responses, error) { errors := make([]error, 0) x := &Responses{} m, ok := compiler.UnpackMap(in) @@ -4787,10 +4565,10 @@ func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) // repeated NamedResponseValue response_code = 1; // MAP: ResponseValue ^([0-9]{3})$|^(default)$ x.ResponseCode = make([]*NamedResponseValue, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if pattern2.MatchString(k) { pair := &NamedResponseValue{} pair.Name = k @@ -4806,20 +4584,20 @@ func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) // repeated NamedAny vendor_extension = 2; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -4839,7 +4617,7 @@ func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) } // NewSchema creates an object of type Schema if possible, returning an error if not. -func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { +func NewSchema(in *yaml.Node, context *compiler.Context) (*Schema, error) { errors := make([]error, 0) x := &Schema{} m, ok := compiler.UnpackMap(in) @@ -4857,36 +4635,36 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { // string _ref = 1; v1 := compiler.MapValueForKey(m, "$ref") if v1 != nil { - x.XRef, ok = v1.(string) + x.XRef, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string format = 2; v2 := compiler.MapValueForKey(m, "format") if v2 != nil { - x.Format, ok = v2.(string) + x.Format, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string title = 3; v3 := compiler.MapValueForKey(m, "title") if v3 != nil { - x.Title, ok = v3.(string) + x.Title, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 4; v4 := compiler.MapValueForKey(m, "description") if v4 != nil { - x.Description, ok = v4.(string) + x.Description, ok = compiler.StringForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } @@ -4902,182 +4680,146 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { // float multiple_of = 6; v6 := compiler.MapValueForKey(m, "multipleOf") if v6 != nil { - switch v6 := v6.(type) { - case float64: - x.MultipleOf = v6 - case float32: - x.MultipleOf = float64(v6) - case uint64: - x.MultipleOf = float64(v6) - case uint32: - x.MultipleOf = float64(v6) - case int64: - x.MultipleOf = float64(v6) - case int32: - x.MultipleOf = float64(v6) - case int: - x.MultipleOf = float64(v6) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v6, v6) + v, ok := compiler.FloatForScalarNode(v6) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } // float maximum = 7; v7 := compiler.MapValueForKey(m, "maximum") if v7 != nil { - switch v7 := v7.(type) { - case float64: - x.Maximum = v7 - case float32: - x.Maximum = float64(v7) - case uint64: - x.Maximum = float64(v7) - case uint32: - x.Maximum = float64(v7) - case int64: - x.Maximum = float64(v7) - case int32: - x.Maximum = float64(v7) - case int: - x.Maximum = float64(v7) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v7, v7) + v, ok := compiler.FloatForScalarNode(v7) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v7)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_maximum = 8; v8 := compiler.MapValueForKey(m, "exclusiveMaximum") if v8 != nil { - x.ExclusiveMaximum, ok = v8.(bool) + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v8) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v8, v8) + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v8)) errors = append(errors, compiler.NewError(context, message)) } } // float minimum = 9; v9 := compiler.MapValueForKey(m, "minimum") if v9 != nil { - switch v9 := v9.(type) { - case float64: - x.Minimum = v9 - case float32: - x.Minimum = float64(v9) - case uint64: - x.Minimum = float64(v9) - case uint32: - x.Minimum = float64(v9) - case int64: - x.Minimum = float64(v9) - case int32: - x.Minimum = float64(v9) - case int: - x.Minimum = float64(v9) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v9, v9) + v, ok := compiler.FloatForScalarNode(v9) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v9)) errors = append(errors, compiler.NewError(context, message)) } } // bool exclusive_minimum = 10; v10 := compiler.MapValueForKey(m, "exclusiveMinimum") if v10 != nil { - x.ExclusiveMinimum, ok = v10.(bool) + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v10) if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v10, v10) + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v10)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_length = 11; v11 := compiler.MapValueForKey(m, "maxLength") if v11 != nil { - t, ok := v11.(int) + t, ok := compiler.IntForScalarNode(v11) if ok { x.MaxLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v11, v11) + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v11)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_length = 12; v12 := compiler.MapValueForKey(m, "minLength") if v12 != nil { - t, ok := v12.(int) + t, ok := compiler.IntForScalarNode(v12) if ok { x.MinLength = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v12, v12) + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v12)) errors = append(errors, compiler.NewError(context, message)) } } // string pattern = 13; v13 := compiler.MapValueForKey(m, "pattern") if v13 != nil { - x.Pattern, ok = v13.(string) + x.Pattern, ok = compiler.StringForScalarNode(v13) if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v13, v13) + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v13)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_items = 14; v14 := compiler.MapValueForKey(m, "maxItems") if v14 != nil { - t, ok := v14.(int) + t, ok := compiler.IntForScalarNode(v14) if ok { x.MaxItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v14, v14) + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v14)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_items = 15; v15 := compiler.MapValueForKey(m, "minItems") if v15 != nil { - t, ok := v15.(int) + t, ok := compiler.IntForScalarNode(v15) if ok { x.MinItems = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v15, v15) + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v15)) errors = append(errors, compiler.NewError(context, message)) } } // bool unique_items = 16; v16 := compiler.MapValueForKey(m, "uniqueItems") if v16 != nil { - x.UniqueItems, ok = v16.(bool) + x.UniqueItems, ok = compiler.BoolForScalarNode(v16) if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v16, v16) + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v16)) errors = append(errors, compiler.NewError(context, message)) } } // int64 max_properties = 17; v17 := compiler.MapValueForKey(m, "maxProperties") if v17 != nil { - t, ok := v17.(int) + t, ok := compiler.IntForScalarNode(v17) if ok { x.MaxProperties = int64(t) } else { - message := fmt.Sprintf("has unexpected value for maxProperties: %+v (%T)", v17, v17) + message := fmt.Sprintf("has unexpected value for maxProperties: %s", compiler.Display(v17)) errors = append(errors, compiler.NewError(context, message)) } } // int64 min_properties = 18; v18 := compiler.MapValueForKey(m, "minProperties") if v18 != nil { - t, ok := v18.(int) + t, ok := compiler.IntForScalarNode(v18) if ok { x.MinProperties = int64(t) } else { - message := fmt.Sprintf("has unexpected value for minProperties: %+v (%T)", v18, v18) + message := fmt.Sprintf("has unexpected value for minProperties: %s", compiler.Display(v18)) errors = append(errors, compiler.NewError(context, message)) } } // repeated string required = 19; v19 := compiler.MapValueForKey(m, "required") if v19 != nil { - v, ok := v19.([]interface{}) + v, ok := compiler.SequenceNodeForNode(v19) if ok { - x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + x.Required = compiler.StringArrayForSequenceNode(v) } else { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v19, v19) + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v19)) errors = append(errors, compiler.NewError(context, message)) } } @@ -5086,9 +4828,9 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { if v20 != nil { // repeated Any x.Enum = make([]*Any, 0) - a, ok := v20.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v20) if ok { - for _, item := range a { + for _, item := range a.Content { y, err := NewAny(item, compiler.NewContext("enum", context)) if err != nil { errors = append(errors, err) @@ -5129,9 +4871,9 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { if v24 != nil { // repeated Schema x.AllOf = make([]*Schema, 0) - a, ok := v24.([]interface{}) + a, ok := compiler.SequenceNodeForNode(v24) if ok { - for _, item := range a { + for _, item := range a.Content { y, err := NewSchema(item, compiler.NewContext("allOf", context)) if err != nil { errors = append(errors, err) @@ -5152,18 +4894,18 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { // string discriminator = 26; v26 := compiler.MapValueForKey(m, "discriminator") if v26 != nil { - x.Discriminator, ok = v26.(string) + x.Discriminator, ok = compiler.StringForScalarNode(v26) if !ok { - message := fmt.Sprintf("has unexpected value for discriminator: %+v (%T)", v26, v26) + message := fmt.Sprintf("has unexpected value for discriminator: %s", compiler.Display(v26)) errors = append(errors, compiler.NewError(context, message)) } } // bool read_only = 27; v27 := compiler.MapValueForKey(m, "readOnly") if v27 != nil { - x.ReadOnly, ok = v27.(bool) + x.ReadOnly, ok = compiler.BoolForScalarNode(v27) if !ok { - message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v27, v27) + message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v27)) errors = append(errors, compiler.NewError(context, message)) } } @@ -5197,20 +4939,20 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { // repeated NamedAny vendor_extension = 31; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -5230,7 +4972,7 @@ func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { } // NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not. -func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, error) { +func NewSchemaItem(in *yaml.Node, context *compiler.Context) (*SchemaItem, error) { errors := make([]error, 0) x := &SchemaItem{} matched := false @@ -5270,7 +5012,7 @@ func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, erro } // NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not. -func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*SecurityDefinitions, error) { +func NewSecurityDefinitions(in *yaml.Node, context *compiler.Context) (*SecurityDefinitions, error) { errors := make([]error, 0) x := &SecurityDefinitions{} m, ok := compiler.UnpackMap(in) @@ -5281,10 +5023,10 @@ func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*Securit // repeated NamedSecurityDefinitionsItem additional_properties = 1; // MAP: SecurityDefinitionsItem x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedSecurityDefinitionsItem{} pair.Name = k var err error @@ -5300,7 +5042,7 @@ func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*Securit } // NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not. -func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*SecurityDefinitionsItem, error) { +func NewSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*SecurityDefinitionsItem, error) { errors := make([]error, 0) x := &SecurityDefinitionsItem{} matched := false @@ -5396,7 +5138,7 @@ func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*Sec } // NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. -func NewSecurityRequirement(in interface{}, context *compiler.Context) (*SecurityRequirement, error) { +func NewSecurityRequirement(in *yaml.Node, context *compiler.Context) (*SecurityRequirement, error) { errors := make([]error, 0) x := &SecurityRequirement{} m, ok := compiler.UnpackMap(in) @@ -5407,10 +5149,10 @@ func NewSecurityRequirement(in interface{}, context *compiler.Context) (*Securit // repeated NamedStringArray additional_properties = 1; // MAP: StringArray x.AdditionalProperties = make([]*NamedStringArray, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedStringArray{} pair.Name = k var err error @@ -5426,24 +5168,19 @@ func NewSecurityRequirement(in interface{}, context *compiler.Context) (*Securit } // NewStringArray creates an object of type StringArray if possible, returning an error if not. -func NewStringArray(in interface{}, context *compiler.Context) (*StringArray, error) { +func NewStringArray(in *yaml.Node, context *compiler.Context) (*StringArray, error) { errors := make([]error, 0) x := &StringArray{} - a, ok := in.([]interface{}) - if !ok { - message := fmt.Sprintf("has unexpected value for StringArray: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - x.Value = make([]string, 0) - for _, s := range a { - x.Value = append(x.Value, s.(string)) - } + x.Value = make([]string, 0) + for _, node := range in.Content { + s, _ := compiler.StringForScalarNode(node) + x.Value = append(x.Value, s) } return x, compiler.NewErrorGroupOrNil(errors) } // NewTag creates an object of type Tag if possible, returning an error if not. -func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { +func NewTag(in *yaml.Node, context *compiler.Context) (*Tag, error) { errors := make([]error, 0) x := &Tag{} m, ok := compiler.UnpackMap(in) @@ -5467,18 +5204,18 @@ func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 2; v2 := compiler.MapValueForKey(m, "description") if v2 != nil { - x.Description, ok = v2.(string) + x.Description, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } @@ -5494,20 +5231,20 @@ func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { // repeated NamedAny vendor_extension = 4; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -5527,17 +5264,19 @@ func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { } // NewTypeItem creates an object of type TypeItem if possible, returning an error if not. -func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) { +func NewTypeItem(in *yaml.Node, context *compiler.Context) (*TypeItem, error) { errors := make([]error, 0) x := &TypeItem{} - switch in := in.(type) { - case string: + v1 := in + switch v1.Kind { + case yaml.ScalarNode: x.Value = make([]string, 0) - x.Value = append(x.Value, in) - case []interface{}: + x.Value = append(x.Value, v1.Value) + case yaml.SequenceNode: x.Value = make([]string, 0) - for _, v := range in { - value, ok := v.(string) + for _, v := range v1.Content { + value := v.Value + ok := v.Kind == yaml.ScalarNode if ok { x.Value = append(x.Value, value) } else { @@ -5553,7 +5292,7 @@ func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) { } // NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not. -func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExtension, error) { +func NewVendorExtension(in *yaml.Node, context *compiler.Context) (*VendorExtension, error) { errors := make([]error, 0) x := &VendorExtension{} m, ok := compiler.UnpackMap(in) @@ -5564,19 +5303,19 @@ func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExten // repeated NamedAny additional_properties = 1; // MAP: Any x.AdditionalProperties = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -5595,7 +5334,7 @@ func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExten } // NewXml creates an object of type Xml if possible, returning an error if not. -func NewXml(in interface{}, context *compiler.Context) (*Xml, error) { +func NewXml(in *yaml.Node, context *compiler.Context) (*Xml, error) { errors := make([]error, 0) x := &Xml{} m, ok := compiler.UnpackMap(in) @@ -5613,65 +5352,65 @@ func NewXml(in interface{}, context *compiler.Context) (*Xml, error) { // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { - x.Name, ok = v1.(string) + x.Name, ok = compiler.StringForScalarNode(v1) if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string namespace = 2; v2 := compiler.MapValueForKey(m, "namespace") if v2 != nil { - x.Namespace, ok = v2.(string) + x.Namespace, ok = compiler.StringForScalarNode(v2) if !ok { - message := fmt.Sprintf("has unexpected value for namespace: %+v (%T)", v2, v2) + message := fmt.Sprintf("has unexpected value for namespace: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string prefix = 3; v3 := compiler.MapValueForKey(m, "prefix") if v3 != nil { - x.Prefix, ok = v3.(string) + x.Prefix, ok = compiler.StringForScalarNode(v3) if !ok { - message := fmt.Sprintf("has unexpected value for prefix: %+v (%T)", v3, v3) + message := fmt.Sprintf("has unexpected value for prefix: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // bool attribute = 4; v4 := compiler.MapValueForKey(m, "attribute") if v4 != nil { - x.Attribute, ok = v4.(bool) + x.Attribute, ok = compiler.BoolForScalarNode(v4) if !ok { - message := fmt.Sprintf("has unexpected value for attribute: %+v (%T)", v4, v4) + message := fmt.Sprintf("has unexpected value for attribute: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // bool wrapped = 5; v5 := compiler.MapValueForKey(m, "wrapped") if v5 != nil { - x.Wrapped, ok = v5.(bool) + x.Wrapped, ok = compiler.BoolForScalarNode(v5) if !ok { - message := fmt.Sprintf("has unexpected value for wrapped: %+v (%T)", v5, v5) + message := fmt.Sprintf("has unexpected value for wrapped: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 6; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { - v := item.Value + v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { - bytes, _ := yaml.Marshal(v) + bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result @@ -5691,7 +5430,7 @@ func NewXml(in interface{}, context *compiler.Context) (*Xml, error) { } // ResolveReferences resolves references found inside AdditionalPropertiesItem objects. -func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, error) { +func (m *AdditionalPropertiesItem) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) { p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) @@ -5706,13 +5445,13 @@ func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, } // ResolveReferences resolves references found inside Any objects. -func (m *Any) ResolveReferences(root string) (interface{}, error) { +func (m *Any) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) return nil, compiler.NewErrorGroupOrNil(errors) } // ResolveReferences resolves references found inside ApiKeySecurity objects. -func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) { +func (m *ApiKeySecurity) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.VendorExtension { if item != nil { @@ -5726,7 +5465,7 @@ func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside BasicAuthenticationSecurity objects. -func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{}, error) { +func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.VendorExtension { if item != nil { @@ -5740,7 +5479,7 @@ func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{ } // ResolveReferences resolves references found inside BodyParameter objects. -func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) { +func (m *BodyParameter) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Schema != nil { _, err := m.Schema.ResolveReferences(root) @@ -5760,7 +5499,7 @@ func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Contact objects. -func (m *Contact) ResolveReferences(root string) (interface{}, error) { +func (m *Contact) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.VendorExtension { if item != nil { @@ -5774,7 +5513,7 @@ func (m *Contact) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Default objects. -func (m *Default) ResolveReferences(root string) (interface{}, error) { +func (m *Default) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -5788,7 +5527,7 @@ func (m *Default) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Definitions objects. -func (m *Definitions) ResolveReferences(root string) (interface{}, error) { +func (m *Definitions) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -5802,7 +5541,7 @@ func (m *Definitions) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Document objects. -func (m *Document) ResolveReferences(root string) (interface{}, error) { +func (m *Document) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Info != nil { _, err := m.Info.ResolveReferences(root) @@ -5874,7 +5613,7 @@ func (m *Document) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Examples objects. -func (m *Examples) ResolveReferences(root string) (interface{}, error) { +func (m *Examples) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -5888,7 +5627,7 @@ func (m *Examples) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside ExternalDocs objects. -func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) { +func (m *ExternalDocs) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.VendorExtension { if item != nil { @@ -5902,7 +5641,7 @@ func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside FileSchema objects. -func (m *FileSchema) ResolveReferences(root string) (interface{}, error) { +func (m *FileSchema) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Default != nil { _, err := m.Default.ResolveReferences(root) @@ -5934,7 +5673,7 @@ func (m *FileSchema) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside FormDataParameterSubSchema objects. -func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{}, error) { +func (m *FormDataParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Items != nil { _, err := m.Items.ResolveReferences(root) @@ -5968,7 +5707,7 @@ func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{} } // ResolveReferences resolves references found inside Header objects. -func (m *Header) ResolveReferences(root string) (interface{}, error) { +func (m *Header) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Items != nil { _, err := m.Items.ResolveReferences(root) @@ -6002,7 +5741,7 @@ func (m *Header) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside HeaderParameterSubSchema objects. -func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, error) { +func (m *HeaderParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Items != nil { _, err := m.Items.ResolveReferences(root) @@ -6036,7 +5775,7 @@ func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, } // ResolveReferences resolves references found inside Headers objects. -func (m *Headers) ResolveReferences(root string) (interface{}, error) { +func (m *Headers) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -6050,7 +5789,7 @@ func (m *Headers) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Info objects. -func (m *Info) ResolveReferences(root string) (interface{}, error) { +func (m *Info) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Contact != nil { _, err := m.Contact.ResolveReferences(root) @@ -6076,7 +5815,7 @@ func (m *Info) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside ItemsItem objects. -func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) { +func (m *ItemsItem) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.Schema { if item != nil { @@ -6090,7 +5829,7 @@ func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside JsonReference objects. -func (m *JsonReference) ResolveReferences(root string) (interface{}, error) { +func (m *JsonReference) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.XRef != "" { info, err := compiler.ReadInfoForRef(root, m.XRef) @@ -6110,7 +5849,7 @@ func (m *JsonReference) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside License objects. -func (m *License) ResolveReferences(root string) (interface{}, error) { +func (m *License) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.VendorExtension { if item != nil { @@ -6124,7 +5863,7 @@ func (m *License) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside NamedAny objects. -func (m *NamedAny) ResolveReferences(root string) (interface{}, error) { +func (m *NamedAny) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Value != nil { _, err := m.Value.ResolveReferences(root) @@ -6136,7 +5875,7 @@ func (m *NamedAny) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside NamedHeader objects. -func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) { +func (m *NamedHeader) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Value != nil { _, err := m.Value.ResolveReferences(root) @@ -6148,7 +5887,7 @@ func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside NamedParameter objects. -func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) { +func (m *NamedParameter) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Value != nil { _, err := m.Value.ResolveReferences(root) @@ -6160,7 +5899,7 @@ func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside NamedPathItem objects. -func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) { +func (m *NamedPathItem) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Value != nil { _, err := m.Value.ResolveReferences(root) @@ -6172,7 +5911,7 @@ func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside NamedResponse objects. -func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) { +func (m *NamedResponse) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Value != nil { _, err := m.Value.ResolveReferences(root) @@ -6184,7 +5923,7 @@ func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside NamedResponseValue objects. -func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) { +func (m *NamedResponseValue) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Value != nil { _, err := m.Value.ResolveReferences(root) @@ -6196,7 +5935,7 @@ func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) } // ResolveReferences resolves references found inside NamedSchema objects. -func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) { +func (m *NamedSchema) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Value != nil { _, err := m.Value.ResolveReferences(root) @@ -6208,7 +5947,7 @@ func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects. -func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { +func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Value != nil { _, err := m.Value.ResolveReferences(root) @@ -6220,13 +5959,13 @@ func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface } // ResolveReferences resolves references found inside NamedString objects. -func (m *NamedString) ResolveReferences(root string) (interface{}, error) { +func (m *NamedString) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) return nil, compiler.NewErrorGroupOrNil(errors) } // ResolveReferences resolves references found inside NamedStringArray objects. -func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) { +func (m *NamedStringArray) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Value != nil { _, err := m.Value.ResolveReferences(root) @@ -6238,7 +5977,7 @@ func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside NonBodyParameter objects. -func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) { +func (m *NonBodyParameter) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) { p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) @@ -6280,7 +6019,7 @@ func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects. -func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, error) { +func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Scopes != nil { _, err := m.Scopes.ResolveReferences(root) @@ -6300,7 +6039,7 @@ func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, } // ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects. -func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, error) { +func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Scopes != nil { _, err := m.Scopes.ResolveReferences(root) @@ -6320,7 +6059,7 @@ func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, } // ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects. -func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, error) { +func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Scopes != nil { _, err := m.Scopes.ResolveReferences(root) @@ -6340,7 +6079,7 @@ func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, er } // ResolveReferences resolves references found inside Oauth2PasswordSecurity objects. -func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, error) { +func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Scopes != nil { _, err := m.Scopes.ResolveReferences(root) @@ -6360,7 +6099,7 @@ func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, er } // ResolveReferences resolves references found inside Oauth2Scopes objects. -func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) { +func (m *Oauth2Scopes) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -6374,7 +6113,7 @@ func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Operation objects. -func (m *Operation) ResolveReferences(root string) (interface{}, error) { +func (m *Operation) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.ExternalDocs != nil { _, err := m.ExternalDocs.ResolveReferences(root) @@ -6416,7 +6155,7 @@ func (m *Operation) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Parameter objects. -func (m *Parameter) ResolveReferences(root string) (interface{}, error) { +func (m *Parameter) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) { p, ok := m.Oneof.(*Parameter_BodyParameter) @@ -6440,7 +6179,7 @@ func (m *Parameter) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside ParameterDefinitions objects. -func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, error) { +func (m *ParameterDefinitions) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -6454,7 +6193,7 @@ func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, erro } // ResolveReferences resolves references found inside ParametersItem objects. -func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) { +func (m *ParametersItem) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) { p, ok := m.Oneof.(*ParametersItem_Parameter) @@ -6486,7 +6225,7 @@ func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside PathItem objects. -func (m *PathItem) ResolveReferences(root string) (interface{}, error) { +func (m *PathItem) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.XRef != "" { info, err := compiler.ReadInfoForRef(root, m.XRef) @@ -6564,7 +6303,7 @@ func (m *PathItem) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside PathParameterSubSchema objects. -func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, error) { +func (m *PathParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Items != nil { _, err := m.Items.ResolveReferences(root) @@ -6598,7 +6337,7 @@ func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, er } // ResolveReferences resolves references found inside Paths objects. -func (m *Paths) ResolveReferences(root string) (interface{}, error) { +func (m *Paths) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.VendorExtension { if item != nil { @@ -6620,7 +6359,7 @@ func (m *Paths) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside PrimitivesItems objects. -func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) { +func (m *PrimitivesItems) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Items != nil { _, err := m.Items.ResolveReferences(root) @@ -6654,7 +6393,7 @@ func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Properties objects. -func (m *Properties) ResolveReferences(root string) (interface{}, error) { +func (m *Properties) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -6668,7 +6407,7 @@ func (m *Properties) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside QueryParameterSubSchema objects. -func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, error) { +func (m *QueryParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Items != nil { _, err := m.Items.ResolveReferences(root) @@ -6702,7 +6441,7 @@ func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, e } // ResolveReferences resolves references found inside Response objects. -func (m *Response) ResolveReferences(root string) (interface{}, error) { +func (m *Response) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.Schema != nil { _, err := m.Schema.ResolveReferences(root) @@ -6734,7 +6473,7 @@ func (m *Response) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside ResponseDefinitions objects. -func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error) { +func (m *ResponseDefinitions) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -6748,7 +6487,7 @@ func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error } // ResolveReferences resolves references found inside ResponseValue objects. -func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) { +func (m *ResponseValue) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) { p, ok := m.Oneof.(*ResponseValue_Response) @@ -6780,7 +6519,7 @@ func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Responses objects. -func (m *Responses) ResolveReferences(root string) (interface{}, error) { +func (m *Responses) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.ResponseCode { if item != nil { @@ -6802,7 +6541,7 @@ func (m *Responses) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Schema objects. -func (m *Schema) ResolveReferences(root string) (interface{}, error) { +func (m *Schema) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.XRef != "" { info, err := compiler.ReadInfoForRef(root, m.XRef) @@ -6894,7 +6633,7 @@ func (m *Schema) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside SchemaItem objects. -func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) { +func (m *SchemaItem) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) { p, ok := m.Oneof.(*SchemaItem_Schema) @@ -6918,7 +6657,7 @@ func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside SecurityDefinitions objects. -func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error) { +func (m *SecurityDefinitions) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -6932,7 +6671,7 @@ func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error } // ResolveReferences resolves references found inside SecurityDefinitionsItem objects. -func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { +func (m *SecurityDefinitionsItem) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) { p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) @@ -6992,7 +6731,7 @@ func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, e } // ResolveReferences resolves references found inside SecurityRequirement objects. -func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error) { +func (m *SecurityRequirement) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -7006,13 +6745,13 @@ func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error } // ResolveReferences resolves references found inside StringArray objects. -func (m *StringArray) ResolveReferences(root string) (interface{}, error) { +func (m *StringArray) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) return nil, compiler.NewErrorGroupOrNil(errors) } // ResolveReferences resolves references found inside Tag objects. -func (m *Tag) ResolveReferences(root string) (interface{}, error) { +func (m *Tag) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) if m.ExternalDocs != nil { _, err := m.ExternalDocs.ResolveReferences(root) @@ -7032,13 +6771,13 @@ func (m *Tag) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside TypeItem objects. -func (m *TypeItem) ResolveReferences(root string) (interface{}, error) { +func (m *TypeItem) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) return nil, compiler.NewErrorGroupOrNil(errors) } // ResolveReferences resolves references found inside VendorExtension objects. -func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) { +func (m *VendorExtension) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.AdditionalProperties { if item != nil { @@ -7052,7 +6791,7 @@ func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) { } // ResolveReferences resolves references found inside Xml objects. -func (m *Xml) ResolveReferences(root string) (interface{}, error) { +func (m *Xml) ResolveReferences(root string) (*yaml.Node, error) { errors := make([]error, 0) for _, item := range m.VendorExtension { if item != nil { @@ -7066,7 +6805,7 @@ func (m *Xml) ResolveReferences(root string) (interface{}, error) { } // ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. -func (m *AdditionalPropertiesItem) ToRawInfo() interface{} { +func (m *AdditionalPropertiesItem) ToRawInfo() *yaml.Node { // ONE OF WRAPPER // AdditionalPropertiesItem // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} @@ -7076,792 +6815,885 @@ func (m *AdditionalPropertiesItem) ToRawInfo() interface{} { } // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { - return v1.Boolean + return compiler.NewScalarNodeForBool(v1.Boolean) } return nil } // ToRawInfo returns a description of Any suitable for JSON or YAML export. -func (m *Any) ToRawInfo() interface{} { +func (m *Any) ToRawInfo() *yaml.Node { var err error - var info1 []yaml.MapSlice - err = yaml.Unmarshal([]byte(m.Yaml), &info1) + var node yaml.Node + err = yaml.Unmarshal([]byte(m.Yaml), &node) if err == nil { - return info1 - } - var info2 yaml.MapSlice - err = yaml.Unmarshal([]byte(m.Yaml), &info2) - if err == nil { - return info2 - } - var info3 interface{} - err = yaml.Unmarshal([]byte(m.Yaml), &info3) - if err == nil { - return info3 + if node.Kind == yaml.DocumentNode { + return node.Content[0] + } + return &node + } else { + return nil } return nil } // ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. -func (m *ApiKeySecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *ApiKeySecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) // always include this required field. - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) // always include this required field. - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. -func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *BasicAuthenticationSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. -func (m *BodyParameter) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *BodyParameter) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } // always include this required field. - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) // always include this required field. - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) if m.Required != false { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } // always include this required field. - info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) - // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, m.Schema.ToRawInfo()) if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Contact suitable for JSON or YAML export. -func (m *Contact) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Contact) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } if m.Url != "" { - info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) } if m.Email != "" { - info = append(info, yaml.MapItem{Key: "email", Value: m.Email}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("email")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Email)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Default suitable for JSON or YAML export. -func (m *Default) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Default) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:} return info } // ToRawInfo returns a description of Definitions suitable for JSON or YAML export. -func (m *Definitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Definitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} return info } // ToRawInfo returns a description of Document suitable for JSON or YAML export. -func (m *Document) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Document) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("swagger")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Swagger)) // always include this required field. - info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()}) - // &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + info.Content = append(info.Content, compiler.NewScalarNodeForString("info")) + info.Content = append(info.Content, m.Info.ToRawInfo()) if m.Host != "" { - info = append(info, yaml.MapItem{Key: "host", Value: m.Host}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("host")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Host)) } if m.BasePath != "" { - info = append(info, yaml.MapItem{Key: "basePath", Value: m.BasePath}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("basePath")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.BasePath)) } if len(m.Schemes) != 0 { - info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes)) } if len(m.Consumes) != 0 { - info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("consumes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Consumes)) } if len(m.Produces) != 0 { - info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("produces")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Produces)) } // always include this required field. - info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()}) - // &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + info.Content = append(info.Content, compiler.NewScalarNodeForString("paths")) + info.Content = append(info.Content, m.Paths.ToRawInfo()) if m.Definitions != nil { - info = append(info, yaml.MapItem{Key: "definitions", Value: m.Definitions.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("definitions")) + info.Content = append(info.Content, m.Definitions.ToRawInfo()) } - // &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Parameters != nil { - info = append(info, yaml.MapItem{Key: "parameters", Value: m.Parameters.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, m.Parameters.ToRawInfo()) } - // &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Responses != nil { - info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) + info.Content = append(info.Content, m.Responses.ToRawInfo()) } - // &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Security) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Security { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "security", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) + info.Content = append(info.Content, items) } - // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.SecurityDefinitions != nil { - info = append(info, yaml.MapItem{Key: "securityDefinitions", Value: m.SecurityDefinitions.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("securityDefinitions")) + info.Content = append(info.Content, m.SecurityDefinitions.ToRawInfo()) } - // &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Tags) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Tags { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "tags", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) + info.Content = append(info.Content, items) } - // &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Examples suitable for JSON or YAML export. -func (m *Examples) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Examples) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} return info } // ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. -func (m *ExternalDocs) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *ExternalDocs) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } // always include this required field. - info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. -func (m *FileSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *FileSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) } if m.Title != "" { - info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Required) != 0 { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required)) } // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) if m.ReadOnly != false { - info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) } if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Example != nil { - info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) + info.Content = append(info.Content, m.Example.ToRawInfo()) } - // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. -func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Required != false { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } if m.AllowEmptyValue != false { - info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) } if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) } if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) } if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) } if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) } if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } if len(m.Enum) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Header suitable for JSON or YAML export. -func (m *Header) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Header) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) } if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) } if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) } if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } if len(m.Enum) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. -func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Required != false { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) } if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) } if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) } if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) } if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } if len(m.Enum) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Headers suitable for JSON or YAML export. -func (m *Headers) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Headers) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:} return info } // ToRawInfo returns a description of Info suitable for JSON or YAML export. -func (m *Info) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Info) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) // always include this required field. - info = append(info, yaml.MapItem{Key: "version", Value: m.Version}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("version")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Version)) if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.TermsOfService != "" { - info = append(info, yaml.MapItem{Key: "termsOfService", Value: m.TermsOfService}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("termsOfService")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TermsOfService)) } if m.Contact != nil { - info = append(info, yaml.MapItem{Key: "contact", Value: m.Contact.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("contact")) + info.Content = append(info.Content, m.Contact.ToRawInfo()) } - // &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.License != nil { - info = append(info, yaml.MapItem{Key: "license", Value: m.License.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("license")) + info.Content = append(info.Content, m.License.ToRawInfo()) } - // &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. -func (m *ItemsItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *ItemsItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if len(m.Schema) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Schema { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "schema", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, items) } - // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} return info } // ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. -func (m *JsonReference) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *JsonReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } return info } // ToRawInfo returns a description of License suitable for JSON or YAML export. -func (m *License) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *License) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) if m.Url != "" { - info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. -func (m *NamedAny) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedAny) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info } // ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. -func (m *NamedHeader) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedHeader) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info } // ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. -func (m *NamedParameter) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedParameter) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info } // ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. -func (m *NamedPathItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedPathItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info } // ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. -func (m *NamedResponse) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedResponse) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info } // ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. -func (m *NamedResponseValue) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedResponseValue) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info } // ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. -func (m *NamedSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info } // ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. -func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedSecurityDefinitionsItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info } // ToRawInfo returns a description of NamedString suitable for JSON or YAML export. -func (m *NamedString) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedString) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } if m.Value != "" { - info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Value)) } return info } // ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. -func (m *NamedStringArray) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *NamedStringArray) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info } // ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export. -func (m *NonBodyParameter) ToRawInfo() interface{} { +func (m *NonBodyParameter) ToRawInfo() *yaml.Node { // ONE OF WRAPPER // NonBodyParameter // {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} @@ -7888,122 +7720,139 @@ func (m *NonBodyParameter) ToRawInfo() interface{} { } // ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. -func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Oauth2AccessCodeSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) // always include this required field. - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) if m.Scopes != nil { - info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} // always include this required field. - info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl)) // always include this required field. - info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. -func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Oauth2ApplicationSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) // always include this required field. - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) if m.Scopes != nil { - info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} // always include this required field. - info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. -func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Oauth2ImplicitSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) // always include this required field. - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) if m.Scopes != nil { - info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} // always include this required field. - info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl)) if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. -func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Oauth2PasswordSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) // always include this required field. - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) if m.Scopes != nil { - info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} // always include this required field. - info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. -func (m *Oauth2Scopes) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Oauth2Scopes) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } @@ -8012,69 +7861,77 @@ func (m *Oauth2Scopes) ToRawInfo() interface{} { } // ToRawInfo returns a description of Operation suitable for JSON or YAML export. -func (m *Operation) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Operation) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if len(m.Tags) != 0 { - info = append(info, yaml.MapItem{Key: "tags", Value: m.Tags}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Tags)) } if m.Summary != "" { - info = append(info, yaml.MapItem{Key: "summary", Value: m.Summary}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.OperationId != "" { - info = append(info, yaml.MapItem{Key: "operationId", Value: m.OperationId}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("operationId")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OperationId)) } if len(m.Produces) != 0 { - info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("produces")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Produces)) } if len(m.Consumes) != 0 { - info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("consumes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Consumes)) } if len(m.Parameters) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Parameters { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "parameters", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, items) } - // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} // always include this required field. - info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) - // &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) + info.Content = append(info.Content, m.Responses.ToRawInfo()) if len(m.Schemes) != 0 { - info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes)) } if m.Deprecated != false { - info = append(info, yaml.MapItem{Key: "deprecated", Value: m.Deprecated}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) } if len(m.Security) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Security { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "security", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) + info.Content = append(info.Content, items) } - // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Parameter suitable for JSON or YAML export. -func (m *Parameter) ToRawInfo() interface{} { +func (m *Parameter) ToRawInfo() *yaml.Node { // ONE OF WRAPPER // Parameter // {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} @@ -8091,22 +7948,22 @@ func (m *Parameter) ToRawInfo() interface{} { } // ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. -func (m *ParameterDefinitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *ParameterDefinitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:} return info } // ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export. -func (m *ParametersItem) ToRawInfo() interface{} { +func (m *ParametersItem) ToRawInfo() *yaml.Node { // ONE OF WRAPPER // ParametersItem // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} @@ -8123,386 +7980,439 @@ func (m *ParametersItem) ToRawInfo() interface{} { } // ToRawInfo returns a description of PathItem suitable for JSON or YAML export. -func (m *PathItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *PathItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.XRef != "" { - info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) } if m.Get != nil { - info = append(info, yaml.MapItem{Key: "get", Value: m.Get.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("get")) + info.Content = append(info.Content, m.Get.ToRawInfo()) } - // &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Put != nil { - info = append(info, yaml.MapItem{Key: "put", Value: m.Put.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("put")) + info.Content = append(info.Content, m.Put.ToRawInfo()) } - // &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Post != nil { - info = append(info, yaml.MapItem{Key: "post", Value: m.Post.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("post")) + info.Content = append(info.Content, m.Post.ToRawInfo()) } - // &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Delete != nil { - info = append(info, yaml.MapItem{Key: "delete", Value: m.Delete.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("delete")) + info.Content = append(info.Content, m.Delete.ToRawInfo()) } - // &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Options != nil { - info = append(info, yaml.MapItem{Key: "options", Value: m.Options.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("options")) + info.Content = append(info.Content, m.Options.ToRawInfo()) } - // &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Head != nil { - info = append(info, yaml.MapItem{Key: "head", Value: m.Head.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("head")) + info.Content = append(info.Content, m.Head.ToRawInfo()) } - // &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Patch != nil { - info = append(info, yaml.MapItem{Key: "patch", Value: m.Patch.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("patch")) + info.Content = append(info.Content, m.Patch.ToRawInfo()) } - // &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Parameters) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Parameters { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "parameters", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, items) } - // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. -func (m *PathParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) } if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) } if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) } if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) } if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } if len(m.Enum) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Paths suitable for JSON or YAML export. -func (m *Paths) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Paths) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} if m.Path != nil { for _, item := range m.Path { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:} return info } // ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. -func (m *PrimitivesItems) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *PrimitivesItems) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) } if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) } if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) } if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) } if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } if len(m.Enum) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Properties suitable for JSON or YAML export. -func (m *Properties) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Properties) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} return info } // ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. -func (m *QueryParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Required != false { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } if m.AllowEmptyValue != false { - info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) } if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) } if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) } if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) } if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) } if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } if len(m.Enum) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Response suitable for JSON or YAML export. -func (m *Response) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Response) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) if m.Schema != nil { - info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, m.Schema.ToRawInfo()) } - // &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Headers != nil { - info = append(info, yaml.MapItem{Key: "headers", Value: m.Headers.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("headers")) + info.Content = append(info.Content, m.Headers.ToRawInfo()) } - // &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Examples != nil { - info = append(info, yaml.MapItem{Key: "examples", Value: m.Examples.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) + info.Content = append(info.Content, m.Examples.ToRawInfo()) } - // &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. -func (m *ResponseDefinitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *ResponseDefinitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:} return info } // ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export. -func (m *ResponseValue) ToRawInfo() interface{} { +func (m *ResponseValue) ToRawInfo() *yaml.Node { // ONE OF WRAPPER // ResponseValue // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} @@ -8519,159 +8429,183 @@ func (m *ResponseValue) ToRawInfo() interface{} { } // ToRawInfo returns a description of Responses suitable for JSON or YAML export. -func (m *Responses) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Responses) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.ResponseCode != nil { for _, item := range m.ResponseCode { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of Schema suitable for JSON or YAML export. -func (m *Schema) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Schema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.XRef != "" { - info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) } if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) } if m.Title != "" { - info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) } if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) } if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) } if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } if m.MaxProperties != 0 { - info = append(info, yaml.MapItem{Key: "maxProperties", Value: m.MaxProperties}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxProperties")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxProperties)) } if m.MinProperties != 0 { - info = append(info, yaml.MapItem{Key: "minProperties", Value: m.MinProperties}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("minProperties")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinProperties)) } if len(m.Required) != 0 { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required)) } if len(m.Enum) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.AdditionalProperties != nil { - info = append(info, yaml.MapItem{Key: "additionalProperties", Value: m.AdditionalProperties.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("additionalProperties")) + info.Content = append(info.Content, m.AdditionalProperties.ToRawInfo()) } - // &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Type != nil { if len(m.Type.Value) == 1 { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value[0]}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type.Value[0])) } else { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Type.Value)) } } - // &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Items != nil { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.Items.Schema { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) + } + if len(items.Content) == 1 { + items = items.Content[0] } - info = append(info, yaml.MapItem{Key: "items", Value: items[0]}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, items) } - // &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.AllOf) != 0 { - items := make([]interface{}, 0) + items := compiler.NewSequenceNode() for _, item := range m.AllOf { - items = append(items, item.ToRawInfo()) + items.Content = append(items.Content, item.ToRawInfo()) } - info = append(info, yaml.MapItem{Key: "allOf", Value: items}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("allOf")) + info.Content = append(info.Content, items) } - // &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.Properties != nil { - info = append(info, yaml.MapItem{Key: "properties", Value: m.Properties.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("properties")) + info.Content = append(info.Content, m.Properties.ToRawInfo()) } - // &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Discriminator != "" { - info = append(info, yaml.MapItem{Key: "discriminator", Value: m.Discriminator}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("discriminator")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Discriminator)) } if m.ReadOnly != false { - info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) } if m.Xml != nil { - info = append(info, yaml.MapItem{Key: "xml", Value: m.Xml.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("xml")) + info.Content = append(info.Content, m.Xml.ToRawInfo()) } - // &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Example != nil { - info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) + info.Content = append(info.Content, m.Example.ToRawInfo()) } - // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export. -func (m *SchemaItem) ToRawInfo() interface{} { +func (m *SchemaItem) ToRawInfo() *yaml.Node { // ONE OF WRAPPER // SchemaItem // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} @@ -8688,22 +8622,22 @@ func (m *SchemaItem) ToRawInfo() interface{} { } // ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. -func (m *SecurityDefinitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *SecurityDefinitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:} return info } // ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export. -func (m *SecurityDefinitionsItem) ToRawInfo() interface{} { +func (m *SecurityDefinitionsItem) ToRawInfo() *yaml.Node { // ONE OF WRAPPER // SecurityDefinitionsItem // {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} @@ -8740,103 +8674,111 @@ func (m *SecurityDefinitionsItem) ToRawInfo() interface{} { } // ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. -func (m *SecurityRequirement) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *SecurityRequirement) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:} return info } // ToRawInfo returns a description of StringArray suitable for JSON or YAML export. -func (m *StringArray) ToRawInfo() interface{} { - return m.Value +func (m *StringArray) ToRawInfo() *yaml.Node { + return compiler.NewSequenceNodeForStringArray(m.Value) } // ToRawInfo returns a description of Tag suitable for JSON or YAML export. -func (m *Tag) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Tag) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } // always include this required field. - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) } if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } // ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. -func (m *TypeItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *TypeItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if len(m.Value) != 0 { - info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Value)) } return info } // ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. -func (m *VendorExtension) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *VendorExtension) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} return info } // ToRawInfo returns a description of Xml suitable for JSON or YAML export. -func (m *Xml) ToRawInfo() interface{} { - info := yaml.MapSlice{} +func (m *Xml) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() if m == nil { return info } if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } if m.Namespace != "" { - info = append(info, yaml.MapItem{Key: "namespace", Value: m.Namespace}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("namespace")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Namespace)) } if m.Prefix != "" { - info = append(info, yaml.MapItem{Key: "prefix", Value: m.Prefix}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("prefix")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Prefix)) } if m.Attribute != false { - info = append(info, yaml.MapItem{Key: "attribute", Value: m.Attribute}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("attribute")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Attribute)) } if m.Wrapped != false { - info = append(info, yaml.MapItem{Key: "wrapped", Value: m.Wrapped}) + info.Content = append(info.Content, compiler.NewScalarNodeForString("wrapped")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Wrapped)) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) } } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} return info } diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go index 6199e7cb3..559ddea1a 100644 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go @@ -1,77 +1,90 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + // Code generated by protoc-gen-go. DO NOT EDIT. -// source: OpenAPIv2/OpenAPIv2.proto +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.12.3 +// source: openapiv2/OpenAPIv2.proto package openapi_v2 import ( - fmt "fmt" proto "github.com/golang/protobuf/proto" any "github.com/golang/protobuf/ptypes/any" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type AdditionalPropertiesItem struct { - // Types that are valid to be assigned to Oneof: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: // *AdditionalPropertiesItem_Schema // *AdditionalPropertiesItem_Boolean - Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` } -func (m *AdditionalPropertiesItem) Reset() { *m = AdditionalPropertiesItem{} } -func (m *AdditionalPropertiesItem) String() string { return proto.CompactTextString(m) } -func (*AdditionalPropertiesItem) ProtoMessage() {} -func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{0} +func (x *AdditionalPropertiesItem) Reset() { + *x = AdditionalPropertiesItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *AdditionalPropertiesItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AdditionalPropertiesItem.Unmarshal(m, b) -} -func (m *AdditionalPropertiesItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AdditionalPropertiesItem.Marshal(b, m, deterministic) -} -func (m *AdditionalPropertiesItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_AdditionalPropertiesItem.Merge(m, src) -} -func (m *AdditionalPropertiesItem) XXX_Size() int { - return xxx_messageInfo_AdditionalPropertiesItem.Size(m) +func (x *AdditionalPropertiesItem) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *AdditionalPropertiesItem) XXX_DiscardUnknown() { - xxx_messageInfo_AdditionalPropertiesItem.DiscardUnknown(m) -} - -var xxx_messageInfo_AdditionalPropertiesItem proto.InternalMessageInfo -type isAdditionalPropertiesItem_Oneof interface { - isAdditionalPropertiesItem_Oneof() -} +func (*AdditionalPropertiesItem) ProtoMessage() {} -type AdditionalPropertiesItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` +func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type AdditionalPropertiesItem_Boolean struct { - Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` +// Deprecated: Use AdditionalPropertiesItem.ProtoReflect.Descriptor instead. +func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{0} } -func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} - -func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} - func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { if m != nil { return m.Oneof @@ -79,202 +92,238 @@ func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { return nil } -func (m *AdditionalPropertiesItem) GetSchema() *Schema { - if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Schema); ok { +func (x *AdditionalPropertiesItem) GetSchema() *Schema { + if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Schema); ok { return x.Schema } return nil } -func (m *AdditionalPropertiesItem) GetBoolean() bool { - if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { +func (x *AdditionalPropertiesItem) GetBoolean() bool { + if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { return x.Boolean } return false } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*AdditionalPropertiesItem) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*AdditionalPropertiesItem_Schema)(nil), - (*AdditionalPropertiesItem_Boolean)(nil), - } +type isAdditionalPropertiesItem_Oneof interface { + isAdditionalPropertiesItem_Oneof() } -type Any struct { - Value *any.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type AdditionalPropertiesItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` } -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{1} +type AdditionalPropertiesItem_Boolean struct { + Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` } -func (m *Any) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Any.Unmarshal(m, b) -} -func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Any.Marshal(b, m, deterministic) +func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} + +func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} + +type Any struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value *any.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"` } -func (m *Any) XXX_Merge(src proto.Message) { - xxx_messageInfo_Any.Merge(m, src) + +func (x *Any) Reset() { + *x = Any{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Any) XXX_Size() int { - return xxx_messageInfo_Any.Size(m) + +func (x *Any) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Any) XXX_DiscardUnknown() { - xxx_messageInfo_Any.DiscardUnknown(m) + +func (*Any) ProtoMessage() {} + +func (x *Any) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Any proto.InternalMessageInfo +// Deprecated: Use Any.ProtoReflect.Descriptor instead. +func (*Any) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{1} +} -func (m *Any) GetValue() *any.Any { - if m != nil { - return m.Value +func (x *Any) GetValue() *any.Any { + if x != nil { + return x.Value } return nil } -func (m *Any) GetYaml() string { - if m != nil { - return m.Yaml +func (x *Any) GetYaml() string { + if x != nil { + return x.Yaml } return "" } type ApiKeySecurity struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ApiKeySecurity) Reset() { *m = ApiKeySecurity{} } -func (m *ApiKeySecurity) String() string { return proto.CompactTextString(m) } -func (*ApiKeySecurity) ProtoMessage() {} -func (*ApiKeySecurity) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{2} -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *ApiKeySecurity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ApiKeySecurity.Unmarshal(m, b) -} -func (m *ApiKeySecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ApiKeySecurity.Marshal(b, m, deterministic) + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *ApiKeySecurity) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApiKeySecurity.Merge(m, src) + +func (x *ApiKeySecurity) Reset() { + *x = ApiKeySecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ApiKeySecurity) XXX_Size() int { - return xxx_messageInfo_ApiKeySecurity.Size(m) + +func (x *ApiKeySecurity) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ApiKeySecurity) XXX_DiscardUnknown() { - xxx_messageInfo_ApiKeySecurity.DiscardUnknown(m) + +func (*ApiKeySecurity) ProtoMessage() {} + +func (x *ApiKeySecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ApiKeySecurity proto.InternalMessageInfo +// Deprecated: Use ApiKeySecurity.ProtoReflect.Descriptor instead. +func (*ApiKeySecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{2} +} -func (m *ApiKeySecurity) GetType() string { - if m != nil { - return m.Type +func (x *ApiKeySecurity) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *ApiKeySecurity) GetName() string { - if m != nil { - return m.Name +func (x *ApiKeySecurity) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *ApiKeySecurity) GetIn() string { - if m != nil { - return m.In +func (x *ApiKeySecurity) GetIn() string { + if x != nil { + return x.In } return "" } -func (m *ApiKeySecurity) GetDescription() string { - if m != nil { - return m.Description +func (x *ApiKeySecurity) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *ApiKeySecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *ApiKeySecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type BasicAuthenticationSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *BasicAuthenticationSecurity) Reset() { *m = BasicAuthenticationSecurity{} } -func (m *BasicAuthenticationSecurity) String() string { return proto.CompactTextString(m) } -func (*BasicAuthenticationSecurity) ProtoMessage() {} -func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{3} + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *BasicAuthenticationSecurity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BasicAuthenticationSecurity.Unmarshal(m, b) -} -func (m *BasicAuthenticationSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BasicAuthenticationSecurity.Marshal(b, m, deterministic) -} -func (m *BasicAuthenticationSecurity) XXX_Merge(src proto.Message) { - xxx_messageInfo_BasicAuthenticationSecurity.Merge(m, src) +func (x *BasicAuthenticationSecurity) Reset() { + *x = BasicAuthenticationSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *BasicAuthenticationSecurity) XXX_Size() int { - return xxx_messageInfo_BasicAuthenticationSecurity.Size(m) + +func (x *BasicAuthenticationSecurity) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *BasicAuthenticationSecurity) XXX_DiscardUnknown() { - xxx_messageInfo_BasicAuthenticationSecurity.DiscardUnknown(m) + +func (*BasicAuthenticationSecurity) ProtoMessage() {} + +func (x *BasicAuthenticationSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_BasicAuthenticationSecurity proto.InternalMessageInfo +// Deprecated: Use BasicAuthenticationSecurity.ProtoReflect.Descriptor instead. +func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{3} +} -func (m *BasicAuthenticationSecurity) GetType() string { - if m != nil { - return m.Type +func (x *BasicAuthenticationSecurity) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *BasicAuthenticationSecurity) GetDescription() string { - if m != nil { - return m.Description +func (x *BasicAuthenticationSecurity) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type BodyParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` // The name of the parameter. @@ -282,228 +331,260 @@ type BodyParameter struct { // Determines the location of the parameter. In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` - Schema *Schema `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Schema *Schema `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *BodyParameter) Reset() { *m = BodyParameter{} } -func (m *BodyParameter) String() string { return proto.CompactTextString(m) } -func (*BodyParameter) ProtoMessage() {} -func (*BodyParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{4} +func (x *BodyParameter) Reset() { + *x = BodyParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *BodyParameter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BodyParameter.Unmarshal(m, b) +func (x *BodyParameter) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *BodyParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BodyParameter.Marshal(b, m, deterministic) -} -func (m *BodyParameter) XXX_Merge(src proto.Message) { - xxx_messageInfo_BodyParameter.Merge(m, src) -} -func (m *BodyParameter) XXX_Size() int { - return xxx_messageInfo_BodyParameter.Size(m) -} -func (m *BodyParameter) XXX_DiscardUnknown() { - xxx_messageInfo_BodyParameter.DiscardUnknown(m) + +func (*BodyParameter) ProtoMessage() {} + +func (x *BodyParameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_BodyParameter proto.InternalMessageInfo +// Deprecated: Use BodyParameter.ProtoReflect.Descriptor instead. +func (*BodyParameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{4} +} -func (m *BodyParameter) GetDescription() string { - if m != nil { - return m.Description +func (x *BodyParameter) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *BodyParameter) GetName() string { - if m != nil { - return m.Name +func (x *BodyParameter) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *BodyParameter) GetIn() string { - if m != nil { - return m.In +func (x *BodyParameter) GetIn() string { + if x != nil { + return x.In } return "" } -func (m *BodyParameter) GetRequired() bool { - if m != nil { - return m.Required +func (x *BodyParameter) GetRequired() bool { + if x != nil { + return x.Required } return false } -func (m *BodyParameter) GetSchema() *Schema { - if m != nil { - return m.Schema +func (x *BodyParameter) GetSchema() *Schema { + if x != nil { + return x.Schema } return nil } -func (m *BodyParameter) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *BodyParameter) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } // Contact information for the owners of the API. type Contact struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The identifying name of the contact person/organization. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The URL pointing to the contact information. Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` // The email address of the contact person/organization. - Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Contact) Reset() { *m = Contact{} } -func (m *Contact) String() string { return proto.CompactTextString(m) } -func (*Contact) ProtoMessage() {} -func (*Contact) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{5} +func (x *Contact) Reset() { + *x = Contact{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Contact) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Contact.Unmarshal(m, b) -} -func (m *Contact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Contact.Marshal(b, m, deterministic) -} -func (m *Contact) XXX_Merge(src proto.Message) { - xxx_messageInfo_Contact.Merge(m, src) +func (x *Contact) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Contact) XXX_Size() int { - return xxx_messageInfo_Contact.Size(m) -} -func (m *Contact) XXX_DiscardUnknown() { - xxx_messageInfo_Contact.DiscardUnknown(m) + +func (*Contact) ProtoMessage() {} + +func (x *Contact) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Contact proto.InternalMessageInfo +// Deprecated: Use Contact.ProtoReflect.Descriptor instead. +func (*Contact) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{5} +} -func (m *Contact) GetName() string { - if m != nil { - return m.Name +func (x *Contact) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *Contact) GetUrl() string { - if m != nil { - return m.Url +func (x *Contact) GetUrl() string { + if x != nil { + return x.Url } return "" } -func (m *Contact) GetEmail() string { - if m != nil { - return m.Email +func (x *Contact) GetEmail() string { + if x != nil { + return x.Email } return "" } -func (m *Contact) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Contact) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Default struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Default) Reset() { *m = Default{} } -func (m *Default) String() string { return proto.CompactTextString(m) } -func (*Default) ProtoMessage() {} -func (*Default) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{6} +func (x *Default) Reset() { + *x = Default{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Default) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Default.Unmarshal(m, b) -} -func (m *Default) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Default.Marshal(b, m, deterministic) -} -func (m *Default) XXX_Merge(src proto.Message) { - xxx_messageInfo_Default.Merge(m, src) -} -func (m *Default) XXX_Size() int { - return xxx_messageInfo_Default.Size(m) +func (x *Default) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Default) XXX_DiscardUnknown() { - xxx_messageInfo_Default.DiscardUnknown(m) + +func (*Default) ProtoMessage() {} + +func (x *Default) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Default proto.InternalMessageInfo +// Deprecated: Use Default.ProtoReflect.Descriptor instead. +func (*Default) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{6} +} -func (m *Default) GetAdditionalProperties() []*NamedAny { - if m != nil { - return m.AdditionalProperties +func (x *Default) GetAdditionalProperties() []*NamedAny { + if x != nil { + return x.AdditionalProperties } return nil } // One or more JSON objects describing the schemas being consumed and produced by the API. type Definitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Definitions) Reset() { *m = Definitions{} } -func (m *Definitions) String() string { return proto.CompactTextString(m) } -func (*Definitions) ProtoMessage() {} -func (*Definitions) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{7} +func (x *Definitions) Reset() { + *x = Definitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Definitions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Definitions.Unmarshal(m, b) -} -func (m *Definitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Definitions.Marshal(b, m, deterministic) -} -func (m *Definitions) XXX_Merge(src proto.Message) { - xxx_messageInfo_Definitions.Merge(m, src) -} -func (m *Definitions) XXX_Size() int { - return xxx_messageInfo_Definitions.Size(m) +func (x *Definitions) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Definitions) XXX_DiscardUnknown() { - xxx_messageInfo_Definitions.DiscardUnknown(m) + +func (*Definitions) ProtoMessage() {} + +func (x *Definitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Definitions proto.InternalMessageInfo +// Deprecated: Use Definitions.ProtoReflect.Descriptor instead. +func (*Definitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{7} +} -func (m *Definitions) GetAdditionalProperties() []*NamedSchema { - if m != nil { - return m.AdditionalProperties +func (x *Definitions) GetAdditionalProperties() []*NamedSchema { + if x != nil { + return x.AdditionalProperties } return nil } type Document struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The Swagger version of this document. Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"` Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` @@ -516,366 +597,398 @@ type Document struct { // A list of MIME types accepted by the API. Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` - Paths *Paths `protobuf:"bytes,8,opt,name=paths,proto3" json:"paths,omitempty"` - Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions,proto3" json:"definitions,omitempty"` - Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters,proto3" json:"parameters,omitempty"` - Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses,proto3" json:"responses,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` - SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` - Tags []*Tag `protobuf:"bytes,14,rep,name=tags,proto3" json:"tags,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Document) Reset() { *m = Document{} } -func (m *Document) String() string { return proto.CompactTextString(m) } -func (*Document) ProtoMessage() {} -func (*Document) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{8} + Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + Paths *Paths `protobuf:"bytes,8,opt,name=paths,proto3" json:"paths,omitempty"` + Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions,proto3" json:"definitions,omitempty"` + Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters,proto3" json:"parameters,omitempty"` + Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses,proto3" json:"responses,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` + Tags []*Tag `protobuf:"bytes,14,rep,name=tags,proto3" json:"tags,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Document) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Document.Unmarshal(m, b) -} -func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Document.Marshal(b, m, deterministic) -} -func (m *Document) XXX_Merge(src proto.Message) { - xxx_messageInfo_Document.Merge(m, src) +func (x *Document) Reset() { + *x = Document{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Document) XXX_Size() int { - return xxx_messageInfo_Document.Size(m) + +func (x *Document) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Document) XXX_DiscardUnknown() { - xxx_messageInfo_Document.DiscardUnknown(m) + +func (*Document) ProtoMessage() {} + +func (x *Document) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Document proto.InternalMessageInfo +// Deprecated: Use Document.ProtoReflect.Descriptor instead. +func (*Document) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{8} +} -func (m *Document) GetSwagger() string { - if m != nil { - return m.Swagger +func (x *Document) GetSwagger() string { + if x != nil { + return x.Swagger } return "" } -func (m *Document) GetInfo() *Info { - if m != nil { - return m.Info +func (x *Document) GetInfo() *Info { + if x != nil { + return x.Info } return nil } -func (m *Document) GetHost() string { - if m != nil { - return m.Host +func (x *Document) GetHost() string { + if x != nil { + return x.Host } return "" } -func (m *Document) GetBasePath() string { - if m != nil { - return m.BasePath +func (x *Document) GetBasePath() string { + if x != nil { + return x.BasePath } return "" } -func (m *Document) GetSchemes() []string { - if m != nil { - return m.Schemes +func (x *Document) GetSchemes() []string { + if x != nil { + return x.Schemes } return nil } -func (m *Document) GetConsumes() []string { - if m != nil { - return m.Consumes +func (x *Document) GetConsumes() []string { + if x != nil { + return x.Consumes } return nil } -func (m *Document) GetProduces() []string { - if m != nil { - return m.Produces +func (x *Document) GetProduces() []string { + if x != nil { + return x.Produces } return nil } -func (m *Document) GetPaths() *Paths { - if m != nil { - return m.Paths +func (x *Document) GetPaths() *Paths { + if x != nil { + return x.Paths } return nil } -func (m *Document) GetDefinitions() *Definitions { - if m != nil { - return m.Definitions +func (x *Document) GetDefinitions() *Definitions { + if x != nil { + return x.Definitions } return nil } -func (m *Document) GetParameters() *ParameterDefinitions { - if m != nil { - return m.Parameters +func (x *Document) GetParameters() *ParameterDefinitions { + if x != nil { + return x.Parameters } return nil } -func (m *Document) GetResponses() *ResponseDefinitions { - if m != nil { - return m.Responses +func (x *Document) GetResponses() *ResponseDefinitions { + if x != nil { + return x.Responses } return nil } -func (m *Document) GetSecurity() []*SecurityRequirement { - if m != nil { - return m.Security +func (x *Document) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security } return nil } -func (m *Document) GetSecurityDefinitions() *SecurityDefinitions { - if m != nil { - return m.SecurityDefinitions +func (x *Document) GetSecurityDefinitions() *SecurityDefinitions { + if x != nil { + return x.SecurityDefinitions } return nil } -func (m *Document) GetTags() []*Tag { - if m != nil { - return m.Tags +func (x *Document) GetTags() []*Tag { + if x != nil { + return x.Tags } return nil } -func (m *Document) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs +func (x *Document) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs } return nil } -func (m *Document) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Document) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Examples struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Examples) Reset() { *m = Examples{} } -func (m *Examples) String() string { return proto.CompactTextString(m) } -func (*Examples) ProtoMessage() {} -func (*Examples) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{9} +func (x *Examples) Reset() { + *x = Examples{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Examples) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Examples.Unmarshal(m, b) +func (x *Examples) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Examples) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Examples.Marshal(b, m, deterministic) -} -func (m *Examples) XXX_Merge(src proto.Message) { - xxx_messageInfo_Examples.Merge(m, src) -} -func (m *Examples) XXX_Size() int { - return xxx_messageInfo_Examples.Size(m) -} -func (m *Examples) XXX_DiscardUnknown() { - xxx_messageInfo_Examples.DiscardUnknown(m) + +func (*Examples) ProtoMessage() {} + +func (x *Examples) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Examples proto.InternalMessageInfo +// Deprecated: Use Examples.ProtoReflect.Descriptor instead. +func (*Examples) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{9} +} -func (m *Examples) GetAdditionalProperties() []*NamedAny { - if m != nil { - return m.AdditionalProperties +func (x *Examples) GetAdditionalProperties() []*NamedAny { + if x != nil { + return x.AdditionalProperties } return nil } // information about external documentation type ExternalDocs struct { - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *ExternalDocs) Reset() { *m = ExternalDocs{} } -func (m *ExternalDocs) String() string { return proto.CompactTextString(m) } -func (*ExternalDocs) ProtoMessage() {} -func (*ExternalDocs) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{10} + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *ExternalDocs) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExternalDocs.Unmarshal(m, b) -} -func (m *ExternalDocs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExternalDocs.Marshal(b, m, deterministic) -} -func (m *ExternalDocs) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExternalDocs.Merge(m, src) +func (x *ExternalDocs) Reset() { + *x = ExternalDocs{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ExternalDocs) XXX_Size() int { - return xxx_messageInfo_ExternalDocs.Size(m) + +func (x *ExternalDocs) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ExternalDocs) XXX_DiscardUnknown() { - xxx_messageInfo_ExternalDocs.DiscardUnknown(m) + +func (*ExternalDocs) ProtoMessage() {} + +func (x *ExternalDocs) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ExternalDocs proto.InternalMessageInfo +// Deprecated: Use ExternalDocs.ProtoReflect.Descriptor instead. +func (*ExternalDocs) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{10} +} -func (m *ExternalDocs) GetDescription() string { - if m != nil { - return m.Description +func (x *ExternalDocs) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *ExternalDocs) GetUrl() string { - if m != nil { - return m.Url +func (x *ExternalDocs) GetUrl() string { + if x != nil { + return x.Url } return "" } -func (m *ExternalDocs) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *ExternalDocs) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } // A deterministic version of a JSON Schema object. type FileSchema struct { - Format string `protobuf:"bytes,1,opt,name=format,proto3" json:"format,omitempty"` - Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - Default *Any `protobuf:"bytes,4,opt,name=default,proto3" json:"default,omitempty"` - Required []string `protobuf:"bytes,5,rep,name=required,proto3" json:"required,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` - ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileSchema) Reset() { *m = FileSchema{} } -func (m *FileSchema) String() string { return proto.CompactTextString(m) } -func (*FileSchema) ProtoMessage() {} -func (*FileSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{11} -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *FileSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileSchema.Unmarshal(m, b) -} -func (m *FileSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileSchema.Marshal(b, m, deterministic) + Format string `protobuf:"bytes,1,opt,name=format,proto3" json:"format,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Default *Any `protobuf:"bytes,4,opt,name=default,proto3" json:"default,omitempty"` + Required []string `protobuf:"bytes,5,rep,name=required,proto3" json:"required,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *FileSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileSchema.Merge(m, src) + +func (x *FileSchema) Reset() { + *x = FileSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *FileSchema) XXX_Size() int { - return xxx_messageInfo_FileSchema.Size(m) + +func (x *FileSchema) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *FileSchema) XXX_DiscardUnknown() { - xxx_messageInfo_FileSchema.DiscardUnknown(m) + +func (*FileSchema) ProtoMessage() {} + +func (x *FileSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_FileSchema proto.InternalMessageInfo +// Deprecated: Use FileSchema.ProtoReflect.Descriptor instead. +func (*FileSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{11} +} -func (m *FileSchema) GetFormat() string { - if m != nil { - return m.Format +func (x *FileSchema) GetFormat() string { + if x != nil { + return x.Format } return "" } -func (m *FileSchema) GetTitle() string { - if m != nil { - return m.Title +func (x *FileSchema) GetTitle() string { + if x != nil { + return x.Title } return "" } -func (m *FileSchema) GetDescription() string { - if m != nil { - return m.Description +func (x *FileSchema) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *FileSchema) GetDefault() *Any { - if m != nil { - return m.Default +func (x *FileSchema) GetDefault() *Any { + if x != nil { + return x.Default } return nil } -func (m *FileSchema) GetRequired() []string { - if m != nil { - return m.Required +func (x *FileSchema) GetRequired() []string { + if x != nil { + return x.Required } return nil } -func (m *FileSchema) GetType() string { - if m != nil { - return m.Type +func (x *FileSchema) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *FileSchema) GetReadOnly() bool { - if m != nil { - return m.ReadOnly +func (x *FileSchema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly } return false } -func (m *FileSchema) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs +func (x *FileSchema) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs } return nil } -func (m *FileSchema) GetExample() *Any { - if m != nil { - return m.Example +func (x *FileSchema) GetExample() *Any { + if x != nil { + return x.Example } return nil } -func (m *FileSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *FileSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type FormDataParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Determines whether or not this parameter is required or optional. Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` // Determines the location of the parameter. @@ -885,400 +998,416 @@ type FormDataParameterSubSchema struct { // The name of the parameter. Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FormDataParameterSubSchema) Reset() { *m = FormDataParameterSubSchema{} } -func (m *FormDataParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*FormDataParameterSubSchema) ProtoMessage() {} + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *FormDataParameterSubSchema) Reset() { + *x = FormDataParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FormDataParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FormDataParameterSubSchema) ProtoMessage() {} + +func (x *FormDataParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FormDataParameterSubSchema.ProtoReflect.Descriptor instead. func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{12} -} - -func (m *FormDataParameterSubSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FormDataParameterSubSchema.Unmarshal(m, b) -} -func (m *FormDataParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FormDataParameterSubSchema.Marshal(b, m, deterministic) + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{12} } -func (m *FormDataParameterSubSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_FormDataParameterSubSchema.Merge(m, src) -} -func (m *FormDataParameterSubSchema) XXX_Size() int { - return xxx_messageInfo_FormDataParameterSubSchema.Size(m) -} -func (m *FormDataParameterSubSchema) XXX_DiscardUnknown() { - xxx_messageInfo_FormDataParameterSubSchema.DiscardUnknown(m) -} - -var xxx_messageInfo_FormDataParameterSubSchema proto.InternalMessageInfo -func (m *FormDataParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required +func (x *FormDataParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required } return false } -func (m *FormDataParameterSubSchema) GetIn() string { - if m != nil { - return m.In +func (x *FormDataParameterSubSchema) GetIn() string { + if x != nil { + return x.In } return "" } -func (m *FormDataParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description +func (x *FormDataParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *FormDataParameterSubSchema) GetName() string { - if m != nil { - return m.Name +func (x *FormDataParameterSubSchema) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *FormDataParameterSubSchema) GetAllowEmptyValue() bool { - if m != nil { - return m.AllowEmptyValue +func (x *FormDataParameterSubSchema) GetAllowEmptyValue() bool { + if x != nil { + return x.AllowEmptyValue } return false } -func (m *FormDataParameterSubSchema) GetType() string { - if m != nil { - return m.Type +func (x *FormDataParameterSubSchema) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *FormDataParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format +func (x *FormDataParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format } return "" } -func (m *FormDataParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items +func (x *FormDataParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items } return nil } -func (m *FormDataParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat +func (x *FormDataParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat } return "" } -func (m *FormDataParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default +func (x *FormDataParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default } return nil } -func (m *FormDataParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum +func (x *FormDataParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum } return 0 } -func (m *FormDataParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum +func (x *FormDataParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum } return false } -func (m *FormDataParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum +func (x *FormDataParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum } return 0 } -func (m *FormDataParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum +func (x *FormDataParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum } return false } -func (m *FormDataParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength +func (x *FormDataParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength } return 0 } -func (m *FormDataParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength +func (x *FormDataParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength } return 0 } -func (m *FormDataParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern +func (x *FormDataParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern } return "" } -func (m *FormDataParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems +func (x *FormDataParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems } return 0 } -func (m *FormDataParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems +func (x *FormDataParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems } return 0 } -func (m *FormDataParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems +func (x *FormDataParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems } return false } -func (m *FormDataParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum +func (x *FormDataParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum } return nil } -func (m *FormDataParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf +func (x *FormDataParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf } return 0 } -func (m *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Header struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - Description string `protobuf:"bytes,18,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Header) Reset() { *m = Header{} } -func (m *Header) String() string { return proto.CompactTextString(m) } -func (*Header) ProtoMessage() {} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + Description string `protobuf:"bytes,18,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Header) Reset() { + *x = Header{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Header) ProtoMessage() {} + +func (x *Header) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Header.ProtoReflect.Descriptor instead. func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{13} -} - -func (m *Header) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Header.Unmarshal(m, b) -} -func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Header.Marshal(b, m, deterministic) -} -func (m *Header) XXX_Merge(src proto.Message) { - xxx_messageInfo_Header.Merge(m, src) -} -func (m *Header) XXX_Size() int { - return xxx_messageInfo_Header.Size(m) + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{13} } -func (m *Header) XXX_DiscardUnknown() { - xxx_messageInfo_Header.DiscardUnknown(m) -} - -var xxx_messageInfo_Header proto.InternalMessageInfo -func (m *Header) GetType() string { - if m != nil { - return m.Type +func (x *Header) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *Header) GetFormat() string { - if m != nil { - return m.Format +func (x *Header) GetFormat() string { + if x != nil { + return x.Format } return "" } -func (m *Header) GetItems() *PrimitivesItems { - if m != nil { - return m.Items +func (x *Header) GetItems() *PrimitivesItems { + if x != nil { + return x.Items } return nil } -func (m *Header) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat +func (x *Header) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat } return "" } -func (m *Header) GetDefault() *Any { - if m != nil { - return m.Default +func (x *Header) GetDefault() *Any { + if x != nil { + return x.Default } return nil } -func (m *Header) GetMaximum() float64 { - if m != nil { - return m.Maximum +func (x *Header) GetMaximum() float64 { + if x != nil { + return x.Maximum } return 0 } -func (m *Header) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum +func (x *Header) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum } return false } -func (m *Header) GetMinimum() float64 { - if m != nil { - return m.Minimum +func (x *Header) GetMinimum() float64 { + if x != nil { + return x.Minimum } return 0 } -func (m *Header) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum +func (x *Header) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum } return false } -func (m *Header) GetMaxLength() int64 { - if m != nil { - return m.MaxLength +func (x *Header) GetMaxLength() int64 { + if x != nil { + return x.MaxLength } return 0 } -func (m *Header) GetMinLength() int64 { - if m != nil { - return m.MinLength +func (x *Header) GetMinLength() int64 { + if x != nil { + return x.MinLength } return 0 } -func (m *Header) GetPattern() string { - if m != nil { - return m.Pattern +func (x *Header) GetPattern() string { + if x != nil { + return x.Pattern } return "" } -func (m *Header) GetMaxItems() int64 { - if m != nil { - return m.MaxItems +func (x *Header) GetMaxItems() int64 { + if x != nil { + return x.MaxItems } return 0 } -func (m *Header) GetMinItems() int64 { - if m != nil { - return m.MinItems +func (x *Header) GetMinItems() int64 { + if x != nil { + return x.MinItems } return 0 } -func (m *Header) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems +func (x *Header) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems } return false } -func (m *Header) GetEnum() []*Any { - if m != nil { - return m.Enum +func (x *Header) GetEnum() []*Any { + if x != nil { + return x.Enum } return nil } -func (m *Header) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf +func (x *Header) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf } return 0 } -func (m *Header) GetDescription() string { - if m != nil { - return m.Description +func (x *Header) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Header) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Header) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type HeaderParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Determines whether or not this parameter is required or optional. Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` // Determines the location of the parameter. @@ -1286,250 +1415,266 @@ type HeaderParameterSubSchema struct { // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HeaderParameterSubSchema) Reset() { *m = HeaderParameterSubSchema{} } -func (m *HeaderParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*HeaderParameterSubSchema) ProtoMessage() {} + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *HeaderParameterSubSchema) Reset() { + *x = HeaderParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderParameterSubSchema) ProtoMessage() {} + +func (x *HeaderParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderParameterSubSchema.ProtoReflect.Descriptor instead. func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{14} -} - -func (m *HeaderParameterSubSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_HeaderParameterSubSchema.Unmarshal(m, b) -} -func (m *HeaderParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_HeaderParameterSubSchema.Marshal(b, m, deterministic) -} -func (m *HeaderParameterSubSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_HeaderParameterSubSchema.Merge(m, src) -} -func (m *HeaderParameterSubSchema) XXX_Size() int { - return xxx_messageInfo_HeaderParameterSubSchema.Size(m) -} -func (m *HeaderParameterSubSchema) XXX_DiscardUnknown() { - xxx_messageInfo_HeaderParameterSubSchema.DiscardUnknown(m) + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{14} } -var xxx_messageInfo_HeaderParameterSubSchema proto.InternalMessageInfo - -func (m *HeaderParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required +func (x *HeaderParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required } return false } -func (m *HeaderParameterSubSchema) GetIn() string { - if m != nil { - return m.In +func (x *HeaderParameterSubSchema) GetIn() string { + if x != nil { + return x.In } return "" } -func (m *HeaderParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description +func (x *HeaderParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *HeaderParameterSubSchema) GetName() string { - if m != nil { - return m.Name +func (x *HeaderParameterSubSchema) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *HeaderParameterSubSchema) GetType() string { - if m != nil { - return m.Type +func (x *HeaderParameterSubSchema) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *HeaderParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format +func (x *HeaderParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format } return "" } -func (m *HeaderParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items +func (x *HeaderParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items } return nil } -func (m *HeaderParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat +func (x *HeaderParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat } return "" } -func (m *HeaderParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default +func (x *HeaderParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default } return nil } -func (m *HeaderParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum +func (x *HeaderParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum } return 0 } -func (m *HeaderParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum +func (x *HeaderParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum } return false } -func (m *HeaderParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum +func (x *HeaderParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum } return 0 } -func (m *HeaderParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum +func (x *HeaderParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum } return false } -func (m *HeaderParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength +func (x *HeaderParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength } return 0 } -func (m *HeaderParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength +func (x *HeaderParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength } return 0 } -func (m *HeaderParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern +func (x *HeaderParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern } return "" } -func (m *HeaderParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems +func (x *HeaderParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems } return 0 } -func (m *HeaderParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems +func (x *HeaderParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems } return 0 } -func (m *HeaderParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems +func (x *HeaderParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems } return false } -func (m *HeaderParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum +func (x *HeaderParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum } return nil } -func (m *HeaderParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf +func (x *HeaderParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf } return 0 } -func (m *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Headers struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Headers) Reset() { *m = Headers{} } -func (m *Headers) String() string { return proto.CompactTextString(m) } -func (*Headers) ProtoMessage() {} -func (*Headers) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{15} +func (x *Headers) Reset() { + *x = Headers{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Headers) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Headers.Unmarshal(m, b) -} -func (m *Headers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Headers.Marshal(b, m, deterministic) -} -func (m *Headers) XXX_Merge(src proto.Message) { - xxx_messageInfo_Headers.Merge(m, src) +func (x *Headers) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Headers) XXX_Size() int { - return xxx_messageInfo_Headers.Size(m) -} -func (m *Headers) XXX_DiscardUnknown() { - xxx_messageInfo_Headers.DiscardUnknown(m) + +func (*Headers) ProtoMessage() {} + +func (x *Headers) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Headers proto.InternalMessageInfo +// Deprecated: Use Headers.ProtoReflect.Descriptor instead. +func (*Headers) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{15} +} -func (m *Headers) GetAdditionalProperties() []*NamedHeader { - if m != nil { - return m.AdditionalProperties +func (x *Headers) GetAdditionalProperties() []*NamedHeader { + if x != nil { + return x.AdditionalProperties } return nil } // General information about the API. type Info struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // A unique and precise title of the API. Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` // A semantic version number of the API. @@ -1537,797 +1682,885 @@ type Info struct { // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // The terms of service for the API. - TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` - Contact *Contact `protobuf:"bytes,5,opt,name=contact,proto3" json:"contact,omitempty"` - License *License `protobuf:"bytes,6,opt,name=license,proto3" json:"license,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Info) Reset() { *m = Info{} } -func (m *Info) String() string { return proto.CompactTextString(m) } -func (*Info) ProtoMessage() {} -func (*Info) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{16} + TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` + Contact *Contact `protobuf:"bytes,5,opt,name=contact,proto3" json:"contact,omitempty"` + License *License `protobuf:"bytes,6,opt,name=license,proto3" json:"license,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Info) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Info.Unmarshal(m, b) -} -func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Info.Marshal(b, m, deterministic) -} -func (m *Info) XXX_Merge(src proto.Message) { - xxx_messageInfo_Info.Merge(m, src) +func (x *Info) Reset() { + *x = Info{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Info) XXX_Size() int { - return xxx_messageInfo_Info.Size(m) + +func (x *Info) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Info) XXX_DiscardUnknown() { - xxx_messageInfo_Info.DiscardUnknown(m) + +func (*Info) ProtoMessage() {} + +func (x *Info) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Info proto.InternalMessageInfo +// Deprecated: Use Info.ProtoReflect.Descriptor instead. +func (*Info) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{16} +} -func (m *Info) GetTitle() string { - if m != nil { - return m.Title +func (x *Info) GetTitle() string { + if x != nil { + return x.Title } return "" } -func (m *Info) GetVersion() string { - if m != nil { - return m.Version +func (x *Info) GetVersion() string { + if x != nil { + return x.Version } return "" } -func (m *Info) GetDescription() string { - if m != nil { - return m.Description +func (x *Info) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Info) GetTermsOfService() string { - if m != nil { - return m.TermsOfService +func (x *Info) GetTermsOfService() string { + if x != nil { + return x.TermsOfService } return "" } -func (m *Info) GetContact() *Contact { - if m != nil { - return m.Contact +func (x *Info) GetContact() *Contact { + if x != nil { + return x.Contact } return nil } -func (m *Info) GetLicense() *License { - if m != nil { - return m.License +func (x *Info) GetLicense() *License { + if x != nil { + return x.License } return nil } -func (m *Info) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Info) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type ItemsItem struct { - Schema []*Schema `protobuf:"bytes,1,rep,name=schema,proto3" json:"schema,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *ItemsItem) Reset() { *m = ItemsItem{} } -func (m *ItemsItem) String() string { return proto.CompactTextString(m) } -func (*ItemsItem) ProtoMessage() {} -func (*ItemsItem) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{17} + Schema []*Schema `protobuf:"bytes,1,rep,name=schema,proto3" json:"schema,omitempty"` } -func (m *ItemsItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ItemsItem.Unmarshal(m, b) -} -func (m *ItemsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ItemsItem.Marshal(b, m, deterministic) -} -func (m *ItemsItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_ItemsItem.Merge(m, src) +func (x *ItemsItem) Reset() { + *x = ItemsItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ItemsItem) XXX_Size() int { - return xxx_messageInfo_ItemsItem.Size(m) + +func (x *ItemsItem) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ItemsItem) XXX_DiscardUnknown() { - xxx_messageInfo_ItemsItem.DiscardUnknown(m) + +func (*ItemsItem) ProtoMessage() {} + +func (x *ItemsItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ItemsItem proto.InternalMessageInfo +// Deprecated: Use ItemsItem.ProtoReflect.Descriptor instead. +func (*ItemsItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{17} +} -func (m *ItemsItem) GetSchema() []*Schema { - if m != nil { - return m.Schema +func (x *ItemsItem) GetSchema() []*Schema { + if x != nil { + return x.Schema } return nil } type JsonReference struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *JsonReference) Reset() { *m = JsonReference{} } -func (m *JsonReference) String() string { return proto.CompactTextString(m) } -func (*JsonReference) ProtoMessage() {} -func (*JsonReference) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{18} + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` } -func (m *JsonReference) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_JsonReference.Unmarshal(m, b) -} -func (m *JsonReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_JsonReference.Marshal(b, m, deterministic) -} -func (m *JsonReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_JsonReference.Merge(m, src) +func (x *JsonReference) Reset() { + *x = JsonReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *JsonReference) XXX_Size() int { - return xxx_messageInfo_JsonReference.Size(m) + +func (x *JsonReference) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *JsonReference) XXX_DiscardUnknown() { - xxx_messageInfo_JsonReference.DiscardUnknown(m) + +func (*JsonReference) ProtoMessage() {} + +func (x *JsonReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_JsonReference proto.InternalMessageInfo +// Deprecated: Use JsonReference.ProtoReflect.Descriptor instead. +func (*JsonReference) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{18} +} -func (m *JsonReference) GetXRef() string { - if m != nil { - return m.XRef +func (x *JsonReference) GetXRef() string { + if x != nil { + return x.XRef } return "" } -func (m *JsonReference) GetDescription() string { - if m != nil { - return m.Description +func (x *JsonReference) GetDescription() string { + if x != nil { + return x.Description } return "" } type License struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The name of the license type. It's encouraged to use an OSI compatible license. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The URL pointing to the license. - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *License) Reset() { *m = License{} } -func (m *License) String() string { return proto.CompactTextString(m) } -func (*License) ProtoMessage() {} -func (*License) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{19} +func (x *License) Reset() { + *x = License{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *License) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_License.Unmarshal(m, b) -} -func (m *License) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_License.Marshal(b, m, deterministic) -} -func (m *License) XXX_Merge(src proto.Message) { - xxx_messageInfo_License.Merge(m, src) +func (x *License) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *License) XXX_Size() int { - return xxx_messageInfo_License.Size(m) -} -func (m *License) XXX_DiscardUnknown() { - xxx_messageInfo_License.DiscardUnknown(m) + +func (*License) ProtoMessage() {} + +func (x *License) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_License proto.InternalMessageInfo +// Deprecated: Use License.ProtoReflect.Descriptor instead. +func (*License) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{19} +} -func (m *License) GetName() string { - if m != nil { - return m.Name +func (x *License) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *License) GetUrl() string { - if m != nil { - return m.Url +func (x *License) GetUrl() string { + if x != nil { + return x.Url } return "" } -func (m *License) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *License) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } // Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. type NamedAny struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedAny) Reset() { *m = NamedAny{} } -func (m *NamedAny) String() string { return proto.CompactTextString(m) } -func (*NamedAny) ProtoMessage() {} -func (*NamedAny) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{20} +func (x *NamedAny) Reset() { + *x = NamedAny{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedAny) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedAny.Unmarshal(m, b) +func (x *NamedAny) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedAny) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedAny.Marshal(b, m, deterministic) -} -func (m *NamedAny) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedAny.Merge(m, src) -} -func (m *NamedAny) XXX_Size() int { - return xxx_messageInfo_NamedAny.Size(m) -} -func (m *NamedAny) XXX_DiscardUnknown() { - xxx_messageInfo_NamedAny.DiscardUnknown(m) + +func (*NamedAny) ProtoMessage() {} + +func (x *NamedAny) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedAny proto.InternalMessageInfo +// Deprecated: Use NamedAny.ProtoReflect.Descriptor instead. +func (*NamedAny) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{20} +} -func (m *NamedAny) GetName() string { - if m != nil { - return m.Name +func (x *NamedAny) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedAny) GetValue() *Any { - if m != nil { - return m.Value +func (x *NamedAny) GetValue() *Any { + if x != nil { + return x.Value } return nil } // Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. type NamedHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Header `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *Header `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedHeader) Reset() { *m = NamedHeader{} } -func (m *NamedHeader) String() string { return proto.CompactTextString(m) } -func (*NamedHeader) ProtoMessage() {} -func (*NamedHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{21} +func (x *NamedHeader) Reset() { + *x = NamedHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedHeader) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedHeader.Unmarshal(m, b) -} -func (m *NamedHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedHeader.Marshal(b, m, deterministic) -} -func (m *NamedHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedHeader.Merge(m, src) -} -func (m *NamedHeader) XXX_Size() int { - return xxx_messageInfo_NamedHeader.Size(m) +func (x *NamedHeader) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedHeader) XXX_DiscardUnknown() { - xxx_messageInfo_NamedHeader.DiscardUnknown(m) + +func (*NamedHeader) ProtoMessage() {} + +func (x *NamedHeader) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedHeader proto.InternalMessageInfo +// Deprecated: Use NamedHeader.ProtoReflect.Descriptor instead. +func (*NamedHeader) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{21} +} -func (m *NamedHeader) GetName() string { - if m != nil { - return m.Name +func (x *NamedHeader) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedHeader) GetValue() *Header { - if m != nil { - return m.Value +func (x *NamedHeader) GetValue() *Header { + if x != nil { + return x.Value } return nil } // Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. type NamedParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Parameter `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *Parameter `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedParameter) Reset() { *m = NamedParameter{} } -func (m *NamedParameter) String() string { return proto.CompactTextString(m) } -func (*NamedParameter) ProtoMessage() {} -func (*NamedParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{22} +func (x *NamedParameter) Reset() { + *x = NamedParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedParameter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedParameter.Unmarshal(m, b) -} -func (m *NamedParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedParameter.Marshal(b, m, deterministic) -} -func (m *NamedParameter) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedParameter.Merge(m, src) +func (x *NamedParameter) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedParameter) XXX_Size() int { - return xxx_messageInfo_NamedParameter.Size(m) -} -func (m *NamedParameter) XXX_DiscardUnknown() { - xxx_messageInfo_NamedParameter.DiscardUnknown(m) + +func (*NamedParameter) ProtoMessage() {} + +func (x *NamedParameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedParameter proto.InternalMessageInfo +// Deprecated: Use NamedParameter.ProtoReflect.Descriptor instead. +func (*NamedParameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{22} +} -func (m *NamedParameter) GetName() string { - if m != nil { - return m.Name +func (x *NamedParameter) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedParameter) GetValue() *Parameter { - if m != nil { - return m.Value +func (x *NamedParameter) GetValue() *Parameter { + if x != nil { + return x.Value } return nil } // Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. type NamedPathItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *PathItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *PathItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedPathItem) Reset() { *m = NamedPathItem{} } -func (m *NamedPathItem) String() string { return proto.CompactTextString(m) } -func (*NamedPathItem) ProtoMessage() {} -func (*NamedPathItem) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{23} +func (x *NamedPathItem) Reset() { + *x = NamedPathItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedPathItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedPathItem.Unmarshal(m, b) -} -func (m *NamedPathItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedPathItem.Marshal(b, m, deterministic) +func (x *NamedPathItem) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedPathItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedPathItem.Merge(m, src) -} -func (m *NamedPathItem) XXX_Size() int { - return xxx_messageInfo_NamedPathItem.Size(m) -} -func (m *NamedPathItem) XXX_DiscardUnknown() { - xxx_messageInfo_NamedPathItem.DiscardUnknown(m) + +func (*NamedPathItem) ProtoMessage() {} + +func (x *NamedPathItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedPathItem proto.InternalMessageInfo +// Deprecated: Use NamedPathItem.ProtoReflect.Descriptor instead. +func (*NamedPathItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{23} +} -func (m *NamedPathItem) GetName() string { - if m != nil { - return m.Name +func (x *NamedPathItem) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedPathItem) GetValue() *PathItem { - if m != nil { - return m.Value +func (x *NamedPathItem) GetValue() *PathItem { + if x != nil { + return x.Value } return nil } // Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. type NamedResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Response `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *Response `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedResponse) Reset() { *m = NamedResponse{} } -func (m *NamedResponse) String() string { return proto.CompactTextString(m) } -func (*NamedResponse) ProtoMessage() {} -func (*NamedResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{24} +func (x *NamedResponse) Reset() { + *x = NamedResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedResponse.Unmarshal(m, b) +func (x *NamedResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedResponse.Marshal(b, m, deterministic) -} -func (m *NamedResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResponse.Merge(m, src) -} -func (m *NamedResponse) XXX_Size() int { - return xxx_messageInfo_NamedResponse.Size(m) -} -func (m *NamedResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResponse.DiscardUnknown(m) + +func (*NamedResponse) ProtoMessage() {} + +func (x *NamedResponse) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedResponse proto.InternalMessageInfo +// Deprecated: Use NamedResponse.ProtoReflect.Descriptor instead. +func (*NamedResponse) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{24} +} -func (m *NamedResponse) GetName() string { - if m != nil { - return m.Name +func (x *NamedResponse) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedResponse) GetValue() *Response { - if m != nil { - return m.Value +func (x *NamedResponse) GetValue() *Response { + if x != nil { + return x.Value } return nil } // Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. type NamedResponseValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *ResponseValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *ResponseValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedResponseValue) Reset() { *m = NamedResponseValue{} } -func (m *NamedResponseValue) String() string { return proto.CompactTextString(m) } -func (*NamedResponseValue) ProtoMessage() {} -func (*NamedResponseValue) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{25} +func (x *NamedResponseValue) Reset() { + *x = NamedResponseValue{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedResponseValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedResponseValue.Unmarshal(m, b) -} -func (m *NamedResponseValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedResponseValue.Marshal(b, m, deterministic) -} -func (m *NamedResponseValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResponseValue.Merge(m, src) -} -func (m *NamedResponseValue) XXX_Size() int { - return xxx_messageInfo_NamedResponseValue.Size(m) +func (x *NamedResponseValue) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedResponseValue) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResponseValue.DiscardUnknown(m) + +func (*NamedResponseValue) ProtoMessage() {} + +func (x *NamedResponseValue) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedResponseValue proto.InternalMessageInfo +// Deprecated: Use NamedResponseValue.ProtoReflect.Descriptor instead. +func (*NamedResponseValue) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{25} +} -func (m *NamedResponseValue) GetName() string { - if m != nil { - return m.Name +func (x *NamedResponseValue) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedResponseValue) GetValue() *ResponseValue { - if m != nil { - return m.Value +func (x *NamedResponseValue) GetValue() *ResponseValue { + if x != nil { + return x.Value } return nil } // Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. type NamedSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Schema `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *Schema `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedSchema) Reset() { *m = NamedSchema{} } -func (m *NamedSchema) String() string { return proto.CompactTextString(m) } -func (*NamedSchema) ProtoMessage() {} -func (*NamedSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{26} +func (x *NamedSchema) Reset() { + *x = NamedSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedSchema.Unmarshal(m, b) -} -func (m *NamedSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedSchema.Marshal(b, m, deterministic) -} -func (m *NamedSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedSchema.Merge(m, src) +func (x *NamedSchema) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedSchema) XXX_Size() int { - return xxx_messageInfo_NamedSchema.Size(m) -} -func (m *NamedSchema) XXX_DiscardUnknown() { - xxx_messageInfo_NamedSchema.DiscardUnknown(m) + +func (*NamedSchema) ProtoMessage() {} + +func (x *NamedSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedSchema proto.InternalMessageInfo +// Deprecated: Use NamedSchema.ProtoReflect.Descriptor instead. +func (*NamedSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{26} +} -func (m *NamedSchema) GetName() string { - if m != nil { - return m.Name +func (x *NamedSchema) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedSchema) GetValue() *Schema { - if m != nil { - return m.Value +func (x *NamedSchema) GetValue() *Schema { + if x != nil { + return x.Value } return nil } // Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. type NamedSecurityDefinitionsItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedSecurityDefinitionsItem) Reset() { *m = NamedSecurityDefinitionsItem{} } -func (m *NamedSecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } -func (*NamedSecurityDefinitionsItem) ProtoMessage() {} -func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{27} +func (x *NamedSecurityDefinitionsItem) Reset() { + *x = NamedSecurityDefinitionsItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedSecurityDefinitionsItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedSecurityDefinitionsItem.Unmarshal(m, b) -} -func (m *NamedSecurityDefinitionsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedSecurityDefinitionsItem.Marshal(b, m, deterministic) +func (x *NamedSecurityDefinitionsItem) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedSecurityDefinitionsItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedSecurityDefinitionsItem.Merge(m, src) -} -func (m *NamedSecurityDefinitionsItem) XXX_Size() int { - return xxx_messageInfo_NamedSecurityDefinitionsItem.Size(m) -} -func (m *NamedSecurityDefinitionsItem) XXX_DiscardUnknown() { - xxx_messageInfo_NamedSecurityDefinitionsItem.DiscardUnknown(m) + +func (*NamedSecurityDefinitionsItem) ProtoMessage() {} + +func (x *NamedSecurityDefinitionsItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedSecurityDefinitionsItem proto.InternalMessageInfo +// Deprecated: Use NamedSecurityDefinitionsItem.ProtoReflect.Descriptor instead. +func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{27} +} -func (m *NamedSecurityDefinitionsItem) GetName() string { - if m != nil { - return m.Name +func (x *NamedSecurityDefinitionsItem) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { - if m != nil { - return m.Value +func (x *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { + if x != nil { + return x.Value } return nil } // Automatically-generated message used to represent maps of string as ordered (name,value) pairs. type NamedString struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedString) Reset() { *m = NamedString{} } -func (m *NamedString) String() string { return proto.CompactTextString(m) } -func (*NamedString) ProtoMessage() {} -func (*NamedString) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{28} +func (x *NamedString) Reset() { + *x = NamedString{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedString) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedString.Unmarshal(m, b) +func (x *NamedString) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedString.Marshal(b, m, deterministic) -} -func (m *NamedString) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedString.Merge(m, src) -} -func (m *NamedString) XXX_Size() int { - return xxx_messageInfo_NamedString.Size(m) -} -func (m *NamedString) XXX_DiscardUnknown() { - xxx_messageInfo_NamedString.DiscardUnknown(m) + +func (*NamedString) ProtoMessage() {} + +func (x *NamedString) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedString proto.InternalMessageInfo +// Deprecated: Use NamedString.ProtoReflect.Descriptor instead. +func (*NamedString) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{28} +} -func (m *NamedString) GetName() string { - if m != nil { - return m.Name +func (x *NamedString) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedString) GetValue() string { - if m != nil { - return m.Value +func (x *NamedString) GetValue() string { + if x != nil { + return x.Value } return "" } // Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. type NamedStringArray struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Map key Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *StringArray `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *StringArray `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *NamedStringArray) Reset() { *m = NamedStringArray{} } -func (m *NamedStringArray) String() string { return proto.CompactTextString(m) } -func (*NamedStringArray) ProtoMessage() {} -func (*NamedStringArray) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{29} +func (x *NamedStringArray) Reset() { + *x = NamedStringArray{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NamedStringArray) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NamedStringArray.Unmarshal(m, b) -} -func (m *NamedStringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NamedStringArray.Marshal(b, m, deterministic) -} -func (m *NamedStringArray) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedStringArray.Merge(m, src) -} -func (m *NamedStringArray) XXX_Size() int { - return xxx_messageInfo_NamedStringArray.Size(m) +func (x *NamedStringArray) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamedStringArray) XXX_DiscardUnknown() { - xxx_messageInfo_NamedStringArray.DiscardUnknown(m) + +func (*NamedStringArray) ProtoMessage() {} + +func (x *NamedStringArray) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NamedStringArray proto.InternalMessageInfo +// Deprecated: Use NamedStringArray.ProtoReflect.Descriptor instead. +func (*NamedStringArray) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{29} +} -func (m *NamedStringArray) GetName() string { - if m != nil { - return m.Name +func (x *NamedStringArray) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *NamedStringArray) GetValue() *StringArray { - if m != nil { - return m.Value +func (x *NamedStringArray) GetValue() *StringArray { + if x != nil { + return x.Value } return nil } type NonBodyParameter struct { - // Types that are valid to be assigned to Oneof: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: // *NonBodyParameter_HeaderParameterSubSchema // *NonBodyParameter_FormDataParameterSubSchema // *NonBodyParameter_QueryParameterSubSchema // *NonBodyParameter_PathParameterSubSchema - Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` } -func (m *NonBodyParameter) Reset() { *m = NonBodyParameter{} } -func (m *NonBodyParameter) String() string { return proto.CompactTextString(m) } -func (*NonBodyParameter) ProtoMessage() {} -func (*NonBodyParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{30} +func (x *NonBodyParameter) Reset() { + *x = NonBodyParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NonBodyParameter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NonBodyParameter.Unmarshal(m, b) -} -func (m *NonBodyParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NonBodyParameter.Marshal(b, m, deterministic) -} -func (m *NonBodyParameter) XXX_Merge(src proto.Message) { - xxx_messageInfo_NonBodyParameter.Merge(m, src) -} -func (m *NonBodyParameter) XXX_Size() int { - return xxx_messageInfo_NonBodyParameter.Size(m) -} -func (m *NonBodyParameter) XXX_DiscardUnknown() { - xxx_messageInfo_NonBodyParameter.DiscardUnknown(m) +func (x *NonBodyParameter) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_NonBodyParameter proto.InternalMessageInfo +func (*NonBodyParameter) ProtoMessage() {} -type isNonBodyParameter_Oneof interface { - isNonBodyParameter_Oneof() +func (x *NonBodyParameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type NonBodyParameter_HeaderParameterSubSchema struct { - HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,proto3,oneof"` +// Deprecated: Use NonBodyParameter.ProtoReflect.Descriptor instead. +func (*NonBodyParameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{30} } -type NonBodyParameter_FormDataParameterSubSchema struct { - FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,proto3,oneof"` -} - -type NonBodyParameter_QueryParameterSubSchema struct { - QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,proto3,oneof"` -} - -type NonBodyParameter_PathParameterSubSchema struct { - PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,proto3,oneof"` -} - -func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} - func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { if m != nil { return m.Oneof @@ -2335,408 +2568,470 @@ func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { return nil } -func (m *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { +func (x *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { return x.HeaderParameterSubSchema } return nil } -func (m *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { +func (x *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { return x.FormDataParameterSubSchema } return nil } -func (m *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { +func (x *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { return x.QueryParameterSubSchema } return nil } -func (m *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { +func (x *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { return x.PathParameterSubSchema } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*NonBodyParameter) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*NonBodyParameter_HeaderParameterSubSchema)(nil), - (*NonBodyParameter_FormDataParameterSubSchema)(nil), - (*NonBodyParameter_QueryParameterSubSchema)(nil), - (*NonBodyParameter_PathParameterSubSchema)(nil), - } +type isNonBodyParameter_Oneof interface { + isNonBodyParameter_Oneof() } -type Oauth2AccessCodeSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` - TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` - Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Oauth2AccessCodeSecurity) Reset() { *m = Oauth2AccessCodeSecurity{} } -func (m *Oauth2AccessCodeSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2AccessCodeSecurity) ProtoMessage() {} -func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{31} +type NonBodyParameter_HeaderParameterSubSchema struct { + HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,proto3,oneof"` +} + +type NonBodyParameter_FormDataParameterSubSchema struct { + FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,proto3,oneof"` +} + +type NonBodyParameter_QueryParameterSubSchema struct { + QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,proto3,oneof"` } -func (m *Oauth2AccessCodeSecurity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Oauth2AccessCodeSecurity.Unmarshal(m, b) +type NonBodyParameter_PathParameterSubSchema struct { + PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,proto3,oneof"` } -func (m *Oauth2AccessCodeSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Oauth2AccessCodeSecurity.Marshal(b, m, deterministic) + +func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} + +type Oauth2AccessCodeSecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Oauth2AccessCodeSecurity) XXX_Merge(src proto.Message) { - xxx_messageInfo_Oauth2AccessCodeSecurity.Merge(m, src) + +func (x *Oauth2AccessCodeSecurity) Reset() { + *x = Oauth2AccessCodeSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Oauth2AccessCodeSecurity) XXX_Size() int { - return xxx_messageInfo_Oauth2AccessCodeSecurity.Size(m) + +func (x *Oauth2AccessCodeSecurity) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Oauth2AccessCodeSecurity) XXX_DiscardUnknown() { - xxx_messageInfo_Oauth2AccessCodeSecurity.DiscardUnknown(m) + +func (*Oauth2AccessCodeSecurity) ProtoMessage() {} + +func (x *Oauth2AccessCodeSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Oauth2AccessCodeSecurity proto.InternalMessageInfo +// Deprecated: Use Oauth2AccessCodeSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{31} +} -func (m *Oauth2AccessCodeSecurity) GetType() string { - if m != nil { - return m.Type +func (x *Oauth2AccessCodeSecurity) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *Oauth2AccessCodeSecurity) GetFlow() string { - if m != nil { - return m.Flow +func (x *Oauth2AccessCodeSecurity) GetFlow() string { + if x != nil { + return x.Flow } return "" } -func (m *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes +func (x *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes } return nil } -func (m *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { - if m != nil { - return m.AuthorizationUrl +func (x *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { + if x != nil { + return x.AuthorizationUrl } return "" } -func (m *Oauth2AccessCodeSecurity) GetTokenUrl() string { - if m != nil { - return m.TokenUrl +func (x *Oauth2AccessCodeSecurity) GetTokenUrl() string { + if x != nil { + return x.TokenUrl } return "" } -func (m *Oauth2AccessCodeSecurity) GetDescription() string { - if m != nil { - return m.Description +func (x *Oauth2AccessCodeSecurity) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Oauth2ApplicationSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Oauth2ApplicationSecurity) Reset() { *m = Oauth2ApplicationSecurity{} } -func (m *Oauth2ApplicationSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2ApplicationSecurity) ProtoMessage() {} -func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{32} -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Oauth2ApplicationSecurity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Oauth2ApplicationSecurity.Unmarshal(m, b) -} -func (m *Oauth2ApplicationSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Oauth2ApplicationSecurity.Marshal(b, m, deterministic) + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Oauth2ApplicationSecurity) XXX_Merge(src proto.Message) { - xxx_messageInfo_Oauth2ApplicationSecurity.Merge(m, src) + +func (x *Oauth2ApplicationSecurity) Reset() { + *x = Oauth2ApplicationSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Oauth2ApplicationSecurity) XXX_Size() int { - return xxx_messageInfo_Oauth2ApplicationSecurity.Size(m) + +func (x *Oauth2ApplicationSecurity) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Oauth2ApplicationSecurity) XXX_DiscardUnknown() { - xxx_messageInfo_Oauth2ApplicationSecurity.DiscardUnknown(m) + +func (*Oauth2ApplicationSecurity) ProtoMessage() {} + +func (x *Oauth2ApplicationSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Oauth2ApplicationSecurity proto.InternalMessageInfo +// Deprecated: Use Oauth2ApplicationSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{32} +} -func (m *Oauth2ApplicationSecurity) GetType() string { - if m != nil { - return m.Type +func (x *Oauth2ApplicationSecurity) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *Oauth2ApplicationSecurity) GetFlow() string { - if m != nil { - return m.Flow +func (x *Oauth2ApplicationSecurity) GetFlow() string { + if x != nil { + return x.Flow } return "" } -func (m *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes +func (x *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes } return nil } -func (m *Oauth2ApplicationSecurity) GetTokenUrl() string { - if m != nil { - return m.TokenUrl +func (x *Oauth2ApplicationSecurity) GetTokenUrl() string { + if x != nil { + return x.TokenUrl } return "" } -func (m *Oauth2ApplicationSecurity) GetDescription() string { - if m != nil { - return m.Description +func (x *Oauth2ApplicationSecurity) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Oauth2ImplicitSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Oauth2ImplicitSecurity) Reset() { *m = Oauth2ImplicitSecurity{} } -func (m *Oauth2ImplicitSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2ImplicitSecurity) ProtoMessage() {} -func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{33} -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Oauth2ImplicitSecurity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Oauth2ImplicitSecurity.Unmarshal(m, b) + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Oauth2ImplicitSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Oauth2ImplicitSecurity.Marshal(b, m, deterministic) -} -func (m *Oauth2ImplicitSecurity) XXX_Merge(src proto.Message) { - xxx_messageInfo_Oauth2ImplicitSecurity.Merge(m, src) + +func (x *Oauth2ImplicitSecurity) Reset() { + *x = Oauth2ImplicitSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Oauth2ImplicitSecurity) XXX_Size() int { - return xxx_messageInfo_Oauth2ImplicitSecurity.Size(m) + +func (x *Oauth2ImplicitSecurity) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Oauth2ImplicitSecurity) XXX_DiscardUnknown() { - xxx_messageInfo_Oauth2ImplicitSecurity.DiscardUnknown(m) + +func (*Oauth2ImplicitSecurity) ProtoMessage() {} + +func (x *Oauth2ImplicitSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Oauth2ImplicitSecurity proto.InternalMessageInfo +// Deprecated: Use Oauth2ImplicitSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{33} +} -func (m *Oauth2ImplicitSecurity) GetType() string { - if m != nil { - return m.Type +func (x *Oauth2ImplicitSecurity) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *Oauth2ImplicitSecurity) GetFlow() string { - if m != nil { - return m.Flow +func (x *Oauth2ImplicitSecurity) GetFlow() string { + if x != nil { + return x.Flow } return "" } -func (m *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes +func (x *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes } return nil } -func (m *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { - if m != nil { - return m.AuthorizationUrl +func (x *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { + if x != nil { + return x.AuthorizationUrl } return "" } -func (m *Oauth2ImplicitSecurity) GetDescription() string { - if m != nil { - return m.Description +func (x *Oauth2ImplicitSecurity) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Oauth2PasswordSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Oauth2PasswordSecurity) Reset() { *m = Oauth2PasswordSecurity{} } -func (m *Oauth2PasswordSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2PasswordSecurity) ProtoMessage() {} -func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{34} -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Oauth2PasswordSecurity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Oauth2PasswordSecurity.Unmarshal(m, b) -} -func (m *Oauth2PasswordSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Oauth2PasswordSecurity.Marshal(b, m, deterministic) + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Oauth2PasswordSecurity) XXX_Merge(src proto.Message) { - xxx_messageInfo_Oauth2PasswordSecurity.Merge(m, src) + +func (x *Oauth2PasswordSecurity) Reset() { + *x = Oauth2PasswordSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Oauth2PasswordSecurity) XXX_Size() int { - return xxx_messageInfo_Oauth2PasswordSecurity.Size(m) + +func (x *Oauth2PasswordSecurity) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Oauth2PasswordSecurity) XXX_DiscardUnknown() { - xxx_messageInfo_Oauth2PasswordSecurity.DiscardUnknown(m) + +func (*Oauth2PasswordSecurity) ProtoMessage() {} + +func (x *Oauth2PasswordSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Oauth2PasswordSecurity proto.InternalMessageInfo +// Deprecated: Use Oauth2PasswordSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{34} +} -func (m *Oauth2PasswordSecurity) GetType() string { - if m != nil { - return m.Type +func (x *Oauth2PasswordSecurity) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *Oauth2PasswordSecurity) GetFlow() string { - if m != nil { - return m.Flow +func (x *Oauth2PasswordSecurity) GetFlow() string { + if x != nil { + return x.Flow } return "" } -func (m *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes +func (x *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes } return nil } -func (m *Oauth2PasswordSecurity) GetTokenUrl() string { - if m != nil { - return m.TokenUrl +func (x *Oauth2PasswordSecurity) GetTokenUrl() string { + if x != nil { + return x.TokenUrl } return "" } -func (m *Oauth2PasswordSecurity) GetDescription() string { - if m != nil { - return m.Description +func (x *Oauth2PasswordSecurity) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Oauth2Scopes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Oauth2Scopes) Reset() { *m = Oauth2Scopes{} } -func (m *Oauth2Scopes) String() string { return proto.CompactTextString(m) } -func (*Oauth2Scopes) ProtoMessage() {} -func (*Oauth2Scopes) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{35} +func (x *Oauth2Scopes) Reset() { + *x = Oauth2Scopes{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Oauth2Scopes) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Oauth2Scopes.Unmarshal(m, b) -} -func (m *Oauth2Scopes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Oauth2Scopes.Marshal(b, m, deterministic) -} -func (m *Oauth2Scopes) XXX_Merge(src proto.Message) { - xxx_messageInfo_Oauth2Scopes.Merge(m, src) -} -func (m *Oauth2Scopes) XXX_Size() int { - return xxx_messageInfo_Oauth2Scopes.Size(m) +func (x *Oauth2Scopes) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Oauth2Scopes) XXX_DiscardUnknown() { - xxx_messageInfo_Oauth2Scopes.DiscardUnknown(m) + +func (*Oauth2Scopes) ProtoMessage() {} + +func (x *Oauth2Scopes) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Oauth2Scopes proto.InternalMessageInfo +// Deprecated: Use Oauth2Scopes.ProtoReflect.Descriptor instead. +func (*Oauth2Scopes) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{35} +} -func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString { - if m != nil { - return m.AdditionalProperties +func (x *Oauth2Scopes) GetAdditionalProperties() []*NamedString { + if x != nil { + return x.AdditionalProperties } return nil } type Operation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` // A brief summary of the operation. Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` @@ -2753,182 +3048,178 @@ type Operation struct { Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters,proto3" json:"parameters,omitempty"` Responses *Responses `protobuf:"bytes,9,opt,name=responses,proto3" json:"responses,omitempty"` // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,10,rep,name=schemes,proto3" json:"schemes,omitempty"` - Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Operation) Reset() { *m = Operation{} } -func (m *Operation) String() string { return proto.CompactTextString(m) } -func (*Operation) ProtoMessage() {} -func (*Operation) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{36} + Schemes []string `protobuf:"bytes,10,rep,name=schemes,proto3" json:"schemes,omitempty"` + Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Operation) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Operation.Unmarshal(m, b) -} -func (m *Operation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Operation.Marshal(b, m, deterministic) -} -func (m *Operation) XXX_Merge(src proto.Message) { - xxx_messageInfo_Operation.Merge(m, src) +func (x *Operation) Reset() { + *x = Operation{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Operation) XXX_Size() int { - return xxx_messageInfo_Operation.Size(m) + +func (x *Operation) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Operation) XXX_DiscardUnknown() { - xxx_messageInfo_Operation.DiscardUnknown(m) + +func (*Operation) ProtoMessage() {} + +func (x *Operation) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Operation proto.InternalMessageInfo +// Deprecated: Use Operation.ProtoReflect.Descriptor instead. +func (*Operation) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{36} +} -func (m *Operation) GetTags() []string { - if m != nil { - return m.Tags +func (x *Operation) GetTags() []string { + if x != nil { + return x.Tags } return nil } -func (m *Operation) GetSummary() string { - if m != nil { - return m.Summary +func (x *Operation) GetSummary() string { + if x != nil { + return x.Summary } return "" } -func (m *Operation) GetDescription() string { - if m != nil { - return m.Description +func (x *Operation) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Operation) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs +func (x *Operation) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs } return nil } -func (m *Operation) GetOperationId() string { - if m != nil { - return m.OperationId +func (x *Operation) GetOperationId() string { + if x != nil { + return x.OperationId } return "" } -func (m *Operation) GetProduces() []string { - if m != nil { - return m.Produces +func (x *Operation) GetProduces() []string { + if x != nil { + return x.Produces } return nil } -func (m *Operation) GetConsumes() []string { - if m != nil { - return m.Consumes +func (x *Operation) GetConsumes() []string { + if x != nil { + return x.Consumes } return nil } -func (m *Operation) GetParameters() []*ParametersItem { - if m != nil { - return m.Parameters +func (x *Operation) GetParameters() []*ParametersItem { + if x != nil { + return x.Parameters } return nil } -func (m *Operation) GetResponses() *Responses { - if m != nil { - return m.Responses +func (x *Operation) GetResponses() *Responses { + if x != nil { + return x.Responses } return nil } -func (m *Operation) GetSchemes() []string { - if m != nil { - return m.Schemes +func (x *Operation) GetSchemes() []string { + if x != nil { + return x.Schemes } return nil } -func (m *Operation) GetDeprecated() bool { - if m != nil { - return m.Deprecated +func (x *Operation) GetDeprecated() bool { + if x != nil { + return x.Deprecated } return false } -func (m *Operation) GetSecurity() []*SecurityRequirement { - if m != nil { - return m.Security +func (x *Operation) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security } return nil } -func (m *Operation) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Operation) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Parameter struct { - // Types that are valid to be assigned to Oneof: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: // *Parameter_BodyParameter // *Parameter_NonBodyParameter - Oneof isParameter_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Oneof isParameter_Oneof `protobuf_oneof:"oneof"` } -func (m *Parameter) Reset() { *m = Parameter{} } -func (m *Parameter) String() string { return proto.CompactTextString(m) } -func (*Parameter) ProtoMessage() {} -func (*Parameter) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{37} +func (x *Parameter) Reset() { + *x = Parameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Parameter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Parameter.Unmarshal(m, b) -} -func (m *Parameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Parameter.Marshal(b, m, deterministic) -} -func (m *Parameter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Parameter.Merge(m, src) -} -func (m *Parameter) XXX_Size() int { - return xxx_messageInfo_Parameter.Size(m) +func (x *Parameter) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Parameter) XXX_DiscardUnknown() { - xxx_messageInfo_Parameter.DiscardUnknown(m) -} - -var xxx_messageInfo_Parameter proto.InternalMessageInfo -type isParameter_Oneof interface { - isParameter_Oneof() -} +func (*Parameter) ProtoMessage() {} -type Parameter_BodyParameter struct { - BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,proto3,oneof"` +func (x *Parameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type Parameter_NonBodyParameter struct { - NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,proto3,oneof"` +// Deprecated: Use Parameter.ProtoReflect.Descriptor instead. +func (*Parameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{37} } -func (*Parameter_BodyParameter) isParameter_Oneof() {} - -func (*Parameter_NonBodyParameter) isParameter_Oneof() {} - func (m *Parameter) GetOneof() isParameter_Oneof { if m != nil { return m.Oneof @@ -2936,119 +3227,127 @@ func (m *Parameter) GetOneof() isParameter_Oneof { return nil } -func (m *Parameter) GetBodyParameter() *BodyParameter { - if x, ok := m.GetOneof().(*Parameter_BodyParameter); ok { +func (x *Parameter) GetBodyParameter() *BodyParameter { + if x, ok := x.GetOneof().(*Parameter_BodyParameter); ok { return x.BodyParameter } return nil } -func (m *Parameter) GetNonBodyParameter() *NonBodyParameter { - if x, ok := m.GetOneof().(*Parameter_NonBodyParameter); ok { +func (x *Parameter) GetNonBodyParameter() *NonBodyParameter { + if x, ok := x.GetOneof().(*Parameter_NonBodyParameter); ok { return x.NonBodyParameter } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Parameter) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Parameter_BodyParameter)(nil), - (*Parameter_NonBodyParameter)(nil), - } +type isParameter_Oneof interface { + isParameter_Oneof() +} + +type Parameter_BodyParameter struct { + BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,proto3,oneof"` } +type Parameter_NonBodyParameter struct { + NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,proto3,oneof"` +} + +func (*Parameter_BodyParameter) isParameter_Oneof() {} + +func (*Parameter_NonBodyParameter) isParameter_Oneof() {} + // One or more JSON representations for parameters type ParameterDefinitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *ParameterDefinitions) Reset() { *m = ParameterDefinitions{} } -func (m *ParameterDefinitions) String() string { return proto.CompactTextString(m) } -func (*ParameterDefinitions) ProtoMessage() {} -func (*ParameterDefinitions) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{38} +func (x *ParameterDefinitions) Reset() { + *x = ParameterDefinitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ParameterDefinitions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ParameterDefinitions.Unmarshal(m, b) -} -func (m *ParameterDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ParameterDefinitions.Marshal(b, m, deterministic) +func (x *ParameterDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ParameterDefinitions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ParameterDefinitions.Merge(m, src) -} -func (m *ParameterDefinitions) XXX_Size() int { - return xxx_messageInfo_ParameterDefinitions.Size(m) -} -func (m *ParameterDefinitions) XXX_DiscardUnknown() { - xxx_messageInfo_ParameterDefinitions.DiscardUnknown(m) + +func (*ParameterDefinitions) ProtoMessage() {} + +func (x *ParameterDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ParameterDefinitions proto.InternalMessageInfo +// Deprecated: Use ParameterDefinitions.ProtoReflect.Descriptor instead. +func (*ParameterDefinitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{38} +} -func (m *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { - if m != nil { - return m.AdditionalProperties +func (x *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { + if x != nil { + return x.AdditionalProperties } return nil } type ParametersItem struct { - // Types that are valid to be assigned to Oneof: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: // *ParametersItem_Parameter // *ParametersItem_JsonReference - Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` } -func (m *ParametersItem) Reset() { *m = ParametersItem{} } -func (m *ParametersItem) String() string { return proto.CompactTextString(m) } -func (*ParametersItem) ProtoMessage() {} -func (*ParametersItem) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{39} +func (x *ParametersItem) Reset() { + *x = ParametersItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ParametersItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ParametersItem.Unmarshal(m, b) -} -func (m *ParametersItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ParametersItem.Marshal(b, m, deterministic) +func (x *ParametersItem) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ParametersItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_ParametersItem.Merge(m, src) -} -func (m *ParametersItem) XXX_Size() int { - return xxx_messageInfo_ParametersItem.Size(m) -} -func (m *ParametersItem) XXX_DiscardUnknown() { - xxx_messageInfo_ParametersItem.DiscardUnknown(m) -} - -var xxx_messageInfo_ParametersItem proto.InternalMessageInfo -type isParametersItem_Oneof interface { - isParametersItem_Oneof() -} +func (*ParametersItem) ProtoMessage() {} -type ParametersItem_Parameter struct { - Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,proto3,oneof"` +func (x *ParametersItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type ParametersItem_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` +// Deprecated: Use ParametersItem.ProtoReflect.Descriptor instead. +func (*ParametersItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{39} } -func (*ParametersItem_Parameter) isParametersItem_Oneof() {} - -func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} - func (m *ParametersItem) GetOneof() isParametersItem_Oneof { if m != nil { return m.Oneof @@ -3056,29 +3355,41 @@ func (m *ParametersItem) GetOneof() isParametersItem_Oneof { return nil } -func (m *ParametersItem) GetParameter() *Parameter { - if x, ok := m.GetOneof().(*ParametersItem_Parameter); ok { +func (x *ParametersItem) GetParameter() *Parameter { + if x, ok := x.GetOneof().(*ParametersItem_Parameter); ok { return x.Parameter } return nil } -func (m *ParametersItem) GetJsonReference() *JsonReference { - if x, ok := m.GetOneof().(*ParametersItem_JsonReference); ok { +func (x *ParametersItem) GetJsonReference() *JsonReference { + if x, ok := x.GetOneof().(*ParametersItem_JsonReference); ok { return x.JsonReference } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*ParametersItem) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*ParametersItem_Parameter)(nil), - (*ParametersItem_JsonReference)(nil), - } +type isParametersItem_Oneof interface { + isParametersItem_Oneof() +} + +type ParametersItem_Parameter struct { + Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,proto3,oneof"` +} + +type ParametersItem_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` } +func (*ParametersItem_Parameter) isParametersItem_Oneof() {} + +func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} + type PathItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` Get *Operation `protobuf:"bytes,2,opt,name=get,proto3" json:"get,omitempty"` Put *Operation `protobuf:"bytes,3,opt,name=put,proto3" json:"put,omitempty"` @@ -3088,109 +3399,117 @@ type PathItem struct { Head *Operation `protobuf:"bytes,7,opt,name=head,proto3" json:"head,omitempty"` Patch *Operation `protobuf:"bytes,8,opt,name=patch,proto3" json:"patch,omitempty"` // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters,proto3" json:"parameters,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters,proto3" json:"parameters,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *PathItem) Reset() { *m = PathItem{} } -func (m *PathItem) String() string { return proto.CompactTextString(m) } -func (*PathItem) ProtoMessage() {} -func (*PathItem) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{40} +func (x *PathItem) Reset() { + *x = PathItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *PathItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PathItem.Unmarshal(m, b) -} -func (m *PathItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PathItem.Marshal(b, m, deterministic) -} -func (m *PathItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_PathItem.Merge(m, src) -} -func (m *PathItem) XXX_Size() int { - return xxx_messageInfo_PathItem.Size(m) +func (x *PathItem) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PathItem) XXX_DiscardUnknown() { - xxx_messageInfo_PathItem.DiscardUnknown(m) + +func (*PathItem) ProtoMessage() {} + +func (x *PathItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_PathItem proto.InternalMessageInfo +// Deprecated: Use PathItem.ProtoReflect.Descriptor instead. +func (*PathItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{40} +} -func (m *PathItem) GetXRef() string { - if m != nil { - return m.XRef +func (x *PathItem) GetXRef() string { + if x != nil { + return x.XRef } return "" } -func (m *PathItem) GetGet() *Operation { - if m != nil { - return m.Get +func (x *PathItem) GetGet() *Operation { + if x != nil { + return x.Get } return nil } -func (m *PathItem) GetPut() *Operation { - if m != nil { - return m.Put +func (x *PathItem) GetPut() *Operation { + if x != nil { + return x.Put } return nil } -func (m *PathItem) GetPost() *Operation { - if m != nil { - return m.Post +func (x *PathItem) GetPost() *Operation { + if x != nil { + return x.Post } return nil } -func (m *PathItem) GetDelete() *Operation { - if m != nil { - return m.Delete +func (x *PathItem) GetDelete() *Operation { + if x != nil { + return x.Delete } return nil } -func (m *PathItem) GetOptions() *Operation { - if m != nil { - return m.Options +func (x *PathItem) GetOptions() *Operation { + if x != nil { + return x.Options } return nil } -func (m *PathItem) GetHead() *Operation { - if m != nil { - return m.Head +func (x *PathItem) GetHead() *Operation { + if x != nil { + return x.Head } return nil } -func (m *PathItem) GetPatch() *Operation { - if m != nil { - return m.Patch +func (x *PathItem) GetPatch() *Operation { + if x != nil { + return x.Patch } return nil } -func (m *PathItem) GetParameters() []*ParametersItem { - if m != nil { - return m.Parameters +func (x *PathItem) GetParameters() []*ParametersItem { + if x != nil { + return x.Parameters } return nil } -func (m *PathItem) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *PathItem) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type PathParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Determines whether or not this parameter is required or optional. Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` // Determines the location of the parameter. @@ -3198,472 +3517,504 @@ type PathParameterSubSchema struct { // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PathParameterSubSchema) Reset() { *m = PathParameterSubSchema{} } -func (m *PathParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*PathParameterSubSchema) ProtoMessage() {} + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *PathParameterSubSchema) Reset() { + *x = PathParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathParameterSubSchema) ProtoMessage() {} + +func (x *PathParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathParameterSubSchema.ProtoReflect.Descriptor instead. func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{41} + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{41} } -func (m *PathParameterSubSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PathParameterSubSchema.Unmarshal(m, b) -} -func (m *PathParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PathParameterSubSchema.Marshal(b, m, deterministic) -} -func (m *PathParameterSubSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_PathParameterSubSchema.Merge(m, src) -} -func (m *PathParameterSubSchema) XXX_Size() int { - return xxx_messageInfo_PathParameterSubSchema.Size(m) -} -func (m *PathParameterSubSchema) XXX_DiscardUnknown() { - xxx_messageInfo_PathParameterSubSchema.DiscardUnknown(m) -} - -var xxx_messageInfo_PathParameterSubSchema proto.InternalMessageInfo - -func (m *PathParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required +func (x *PathParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required } return false } -func (m *PathParameterSubSchema) GetIn() string { - if m != nil { - return m.In +func (x *PathParameterSubSchema) GetIn() string { + if x != nil { + return x.In } return "" } -func (m *PathParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description +func (x *PathParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *PathParameterSubSchema) GetName() string { - if m != nil { - return m.Name +func (x *PathParameterSubSchema) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *PathParameterSubSchema) GetType() string { - if m != nil { - return m.Type +func (x *PathParameterSubSchema) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *PathParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format +func (x *PathParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format } return "" } -func (m *PathParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items +func (x *PathParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items } return nil } -func (m *PathParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat +func (x *PathParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat } return "" } -func (m *PathParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default +func (x *PathParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default } return nil } -func (m *PathParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum +func (x *PathParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum } return 0 } -func (m *PathParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum +func (x *PathParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum } return false } -func (m *PathParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum +func (x *PathParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum } return 0 } -func (m *PathParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum +func (x *PathParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum } return false } -func (m *PathParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength +func (x *PathParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength } return 0 } -func (m *PathParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength +func (x *PathParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength } return 0 } -func (m *PathParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern +func (x *PathParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern } return "" } -func (m *PathParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems +func (x *PathParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems } return 0 } -func (m *PathParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems +func (x *PathParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems } return 0 } -func (m *PathParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems +func (x *PathParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems } return false } -func (m *PathParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum +func (x *PathParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum } return nil } -func (m *PathParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf +func (x *PathParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf } return 0 } -func (m *PathParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *PathParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } // Relative paths to the individual endpoints. They must be relative to the 'basePath'. type Paths struct { - VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Paths) Reset() { *m = Paths{} } -func (m *Paths) String() string { return proto.CompactTextString(m) } -func (*Paths) ProtoMessage() {} -func (*Paths) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{42} + VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` } -func (m *Paths) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Paths.Unmarshal(m, b) -} -func (m *Paths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Paths.Marshal(b, m, deterministic) -} -func (m *Paths) XXX_Merge(src proto.Message) { - xxx_messageInfo_Paths.Merge(m, src) +func (x *Paths) Reset() { + *x = Paths{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Paths) XXX_Size() int { - return xxx_messageInfo_Paths.Size(m) + +func (x *Paths) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Paths) XXX_DiscardUnknown() { - xxx_messageInfo_Paths.DiscardUnknown(m) + +func (*Paths) ProtoMessage() {} + +func (x *Paths) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Paths proto.InternalMessageInfo +// Deprecated: Use Paths.ProtoReflect.Descriptor instead. +func (*Paths) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{42} +} -func (m *Paths) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Paths) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } -func (m *Paths) GetPath() []*NamedPathItem { - if m != nil { - return m.Path +func (x *Paths) GetPath() []*NamedPathItem { + if x != nil { + return x.Path } return nil } type PrimitivesItems struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PrimitivesItems) Reset() { *m = PrimitivesItems{} } -func (m *PrimitivesItems) String() string { return proto.CompactTextString(m) } -func (*PrimitivesItems) ProtoMessage() {} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *PrimitivesItems) Reset() { + *x = PrimitivesItems{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrimitivesItems) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrimitivesItems) ProtoMessage() {} + +func (x *PrimitivesItems) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrimitivesItems.ProtoReflect.Descriptor instead. func (*PrimitivesItems) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{43} -} - -func (m *PrimitivesItems) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PrimitivesItems.Unmarshal(m, b) -} -func (m *PrimitivesItems) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PrimitivesItems.Marshal(b, m, deterministic) -} -func (m *PrimitivesItems) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrimitivesItems.Merge(m, src) -} -func (m *PrimitivesItems) XXX_Size() int { - return xxx_messageInfo_PrimitivesItems.Size(m) -} -func (m *PrimitivesItems) XXX_DiscardUnknown() { - xxx_messageInfo_PrimitivesItems.DiscardUnknown(m) + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{43} } -var xxx_messageInfo_PrimitivesItems proto.InternalMessageInfo - -func (m *PrimitivesItems) GetType() string { - if m != nil { - return m.Type +func (x *PrimitivesItems) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *PrimitivesItems) GetFormat() string { - if m != nil { - return m.Format +func (x *PrimitivesItems) GetFormat() string { + if x != nil { + return x.Format } return "" } -func (m *PrimitivesItems) GetItems() *PrimitivesItems { - if m != nil { - return m.Items +func (x *PrimitivesItems) GetItems() *PrimitivesItems { + if x != nil { + return x.Items } return nil } -func (m *PrimitivesItems) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat +func (x *PrimitivesItems) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat } return "" } -func (m *PrimitivesItems) GetDefault() *Any { - if m != nil { - return m.Default +func (x *PrimitivesItems) GetDefault() *Any { + if x != nil { + return x.Default } return nil } -func (m *PrimitivesItems) GetMaximum() float64 { - if m != nil { - return m.Maximum +func (x *PrimitivesItems) GetMaximum() float64 { + if x != nil { + return x.Maximum } return 0 } -func (m *PrimitivesItems) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum +func (x *PrimitivesItems) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum } return false } -func (m *PrimitivesItems) GetMinimum() float64 { - if m != nil { - return m.Minimum +func (x *PrimitivesItems) GetMinimum() float64 { + if x != nil { + return x.Minimum } return 0 } -func (m *PrimitivesItems) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum +func (x *PrimitivesItems) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum } return false } -func (m *PrimitivesItems) GetMaxLength() int64 { - if m != nil { - return m.MaxLength +func (x *PrimitivesItems) GetMaxLength() int64 { + if x != nil { + return x.MaxLength } return 0 } -func (m *PrimitivesItems) GetMinLength() int64 { - if m != nil { - return m.MinLength +func (x *PrimitivesItems) GetMinLength() int64 { + if x != nil { + return x.MinLength } return 0 } -func (m *PrimitivesItems) GetPattern() string { - if m != nil { - return m.Pattern +func (x *PrimitivesItems) GetPattern() string { + if x != nil { + return x.Pattern } return "" } -func (m *PrimitivesItems) GetMaxItems() int64 { - if m != nil { - return m.MaxItems +func (x *PrimitivesItems) GetMaxItems() int64 { + if x != nil { + return x.MaxItems } return 0 } -func (m *PrimitivesItems) GetMinItems() int64 { - if m != nil { - return m.MinItems +func (x *PrimitivesItems) GetMinItems() int64 { + if x != nil { + return x.MinItems } return 0 } -func (m *PrimitivesItems) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems +func (x *PrimitivesItems) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems } return false } -func (m *PrimitivesItems) GetEnum() []*Any { - if m != nil { - return m.Enum +func (x *PrimitivesItems) GetEnum() []*Any { + if x != nil { + return x.Enum } return nil } -func (m *PrimitivesItems) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf +func (x *PrimitivesItems) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf } return 0 } -func (m *PrimitivesItems) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *PrimitivesItems) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Properties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Properties) Reset() { *m = Properties{} } -func (m *Properties) String() string { return proto.CompactTextString(m) } -func (*Properties) ProtoMessage() {} -func (*Properties) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{44} +func (x *Properties) Reset() { + *x = Properties{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Properties) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Properties.Unmarshal(m, b) -} -func (m *Properties) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Properties.Marshal(b, m, deterministic) -} -func (m *Properties) XXX_Merge(src proto.Message) { - xxx_messageInfo_Properties.Merge(m, src) -} -func (m *Properties) XXX_Size() int { - return xxx_messageInfo_Properties.Size(m) +func (x *Properties) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Properties) XXX_DiscardUnknown() { - xxx_messageInfo_Properties.DiscardUnknown(m) + +func (*Properties) ProtoMessage() {} + +func (x *Properties) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Properties proto.InternalMessageInfo +// Deprecated: Use Properties.ProtoReflect.Descriptor instead. +func (*Properties) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{44} +} -func (m *Properties) GetAdditionalProperties() []*NamedSchema { - if m != nil { - return m.AdditionalProperties +func (x *Properties) GetAdditionalProperties() []*NamedSchema { + if x != nil { + return x.AdditionalProperties } return nil } type QueryParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Determines whether or not this parameter is required or optional. Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` // Determines the location of the parameter. @@ -3673,378 +4024,390 @@ type QueryParameterSubSchema struct { // The name of the parameter. Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QueryParameterSubSchema) Reset() { *m = QueryParameterSubSchema{} } -func (m *QueryParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*QueryParameterSubSchema) ProtoMessage() {} + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *QueryParameterSubSchema) Reset() { + *x = QueryParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryParameterSubSchema) ProtoMessage() {} + +func (x *QueryParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryParameterSubSchema.ProtoReflect.Descriptor instead. func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{45} + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{45} } -func (m *QueryParameterSubSchema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryParameterSubSchema.Unmarshal(m, b) -} -func (m *QueryParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryParameterSubSchema.Marshal(b, m, deterministic) -} -func (m *QueryParameterSubSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryParameterSubSchema.Merge(m, src) -} -func (m *QueryParameterSubSchema) XXX_Size() int { - return xxx_messageInfo_QueryParameterSubSchema.Size(m) -} -func (m *QueryParameterSubSchema) XXX_DiscardUnknown() { - xxx_messageInfo_QueryParameterSubSchema.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryParameterSubSchema proto.InternalMessageInfo - -func (m *QueryParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required +func (x *QueryParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required } return false } -func (m *QueryParameterSubSchema) GetIn() string { - if m != nil { - return m.In +func (x *QueryParameterSubSchema) GetIn() string { + if x != nil { + return x.In } return "" } -func (m *QueryParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description +func (x *QueryParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *QueryParameterSubSchema) GetName() string { - if m != nil { - return m.Name +func (x *QueryParameterSubSchema) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *QueryParameterSubSchema) GetAllowEmptyValue() bool { - if m != nil { - return m.AllowEmptyValue +func (x *QueryParameterSubSchema) GetAllowEmptyValue() bool { + if x != nil { + return x.AllowEmptyValue } return false } -func (m *QueryParameterSubSchema) GetType() string { - if m != nil { - return m.Type +func (x *QueryParameterSubSchema) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *QueryParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format +func (x *QueryParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format } return "" } -func (m *QueryParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items +func (x *QueryParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items } return nil } -func (m *QueryParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat +func (x *QueryParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat } return "" } -func (m *QueryParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default +func (x *QueryParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default } return nil } -func (m *QueryParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum +func (x *QueryParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum } return 0 } -func (m *QueryParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum +func (x *QueryParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum } return false } -func (m *QueryParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum +func (x *QueryParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum } return 0 } -func (m *QueryParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum +func (x *QueryParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum } return false } -func (m *QueryParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength +func (x *QueryParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength } return 0 } -func (m *QueryParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength +func (x *QueryParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength } return 0 } -func (m *QueryParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern +func (x *QueryParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern } return "" } -func (m *QueryParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems +func (x *QueryParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems } return 0 } -func (m *QueryParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems +func (x *QueryParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems } return 0 } -func (m *QueryParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems +func (x *QueryParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems } return false } -func (m *QueryParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum +func (x *QueryParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum } return nil } -func (m *QueryParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf +func (x *QueryParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf } return 0 } -func (m *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type Response struct { - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` - Headers *Headers `protobuf:"bytes,3,opt,name=headers,proto3" json:"headers,omitempty"` - Examples *Examples `protobuf:"bytes,4,opt,name=examples,proto3" json:"examples,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} -func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{46} -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Response.Unmarshal(m, b) -} -func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Response.Marshal(b, m, deterministic) + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` + Headers *Headers `protobuf:"bytes,3,opt,name=headers,proto3" json:"headers,omitempty"` + Examples *Examples `protobuf:"bytes,4,opt,name=examples,proto3" json:"examples,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_Response.Merge(m, src) + +func (x *Response) Reset() { + *x = Response{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Response) XXX_Size() int { - return xxx_messageInfo_Response.Size(m) + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Response) XXX_DiscardUnknown() { - xxx_messageInfo_Response.DiscardUnknown(m) + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Response proto.InternalMessageInfo +// Deprecated: Use Response.ProtoReflect.Descriptor instead. +func (*Response) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{46} +} -func (m *Response) GetDescription() string { - if m != nil { - return m.Description +func (x *Response) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Response) GetSchema() *SchemaItem { - if m != nil { - return m.Schema +func (x *Response) GetSchema() *SchemaItem { + if x != nil { + return x.Schema } return nil } -func (m *Response) GetHeaders() *Headers { - if m != nil { - return m.Headers +func (x *Response) GetHeaders() *Headers { + if x != nil { + return x.Headers } return nil } -func (m *Response) GetExamples() *Examples { - if m != nil { - return m.Examples +func (x *Response) GetExamples() *Examples { + if x != nil { + return x.Examples } return nil } -func (m *Response) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Response) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } -// One or more JSON representations for parameters +// One or more JSON representations for responses type ResponseDefinitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *ResponseDefinitions) Reset() { *m = ResponseDefinitions{} } -func (m *ResponseDefinitions) String() string { return proto.CompactTextString(m) } -func (*ResponseDefinitions) ProtoMessage() {} -func (*ResponseDefinitions) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{47} +func (x *ResponseDefinitions) Reset() { + *x = ResponseDefinitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ResponseDefinitions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResponseDefinitions.Unmarshal(m, b) -} -func (m *ResponseDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResponseDefinitions.Marshal(b, m, deterministic) +func (x *ResponseDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ResponseDefinitions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseDefinitions.Merge(m, src) -} -func (m *ResponseDefinitions) XXX_Size() int { - return xxx_messageInfo_ResponseDefinitions.Size(m) -} -func (m *ResponseDefinitions) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseDefinitions.DiscardUnknown(m) + +func (*ResponseDefinitions) ProtoMessage() {} + +func (x *ResponseDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ResponseDefinitions proto.InternalMessageInfo +// Deprecated: Use ResponseDefinitions.ProtoReflect.Descriptor instead. +func (*ResponseDefinitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{47} +} -func (m *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { - if m != nil { - return m.AdditionalProperties +func (x *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { + if x != nil { + return x.AdditionalProperties } return nil } type ResponseValue struct { - // Types that are valid to be assigned to Oneof: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: // *ResponseValue_Response // *ResponseValue_JsonReference - Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` } -func (m *ResponseValue) Reset() { *m = ResponseValue{} } -func (m *ResponseValue) String() string { return proto.CompactTextString(m) } -func (*ResponseValue) ProtoMessage() {} -func (*ResponseValue) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{48} +func (x *ResponseValue) Reset() { + *x = ResponseValue{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ResponseValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResponseValue.Unmarshal(m, b) -} -func (m *ResponseValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResponseValue.Marshal(b, m, deterministic) -} -func (m *ResponseValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseValue.Merge(m, src) -} -func (m *ResponseValue) XXX_Size() int { - return xxx_messageInfo_ResponseValue.Size(m) -} -func (m *ResponseValue) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseValue.DiscardUnknown(m) +func (x *ResponseValue) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ResponseValue proto.InternalMessageInfo +func (*ResponseValue) ProtoMessage() {} -type isResponseValue_Oneof interface { - isResponseValue_Oneof() -} - -type ResponseValue_Response struct { - Response *Response `protobuf:"bytes,1,opt,name=response,proto3,oneof"` +func (x *ResponseValue) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type ResponseValue_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` +// Deprecated: Use ResponseValue.ProtoReflect.Descriptor instead. +func (*ResponseValue) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{48} } -func (*ResponseValue_Response) isResponseValue_Oneof() {} - -func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} - func (m *ResponseValue) GetOneof() isResponseValue_Oneof { if m != nil { return m.Oneof @@ -4052,78 +4415,98 @@ func (m *ResponseValue) GetOneof() isResponseValue_Oneof { return nil } -func (m *ResponseValue) GetResponse() *Response { - if x, ok := m.GetOneof().(*ResponseValue_Response); ok { +func (x *ResponseValue) GetResponse() *Response { + if x, ok := x.GetOneof().(*ResponseValue_Response); ok { return x.Response } return nil } -func (m *ResponseValue) GetJsonReference() *JsonReference { - if x, ok := m.GetOneof().(*ResponseValue_JsonReference); ok { +func (x *ResponseValue) GetJsonReference() *JsonReference { + if x, ok := x.GetOneof().(*ResponseValue_JsonReference); ok { return x.JsonReference } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*ResponseValue) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*ResponseValue_Response)(nil), - (*ResponseValue_JsonReference)(nil), - } +type isResponseValue_Oneof interface { + isResponseValue_Oneof() } -// Response objects names can either be any valid HTTP status code or 'default'. -type Responses struct { - ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type ResponseValue_Response struct { + Response *Response `protobuf:"bytes,1,opt,name=response,proto3,oneof"` } -func (m *Responses) Reset() { *m = Responses{} } -func (m *Responses) String() string { return proto.CompactTextString(m) } -func (*Responses) ProtoMessage() {} -func (*Responses) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{49} +type ResponseValue_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` } -func (m *Responses) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Responses.Unmarshal(m, b) -} -func (m *Responses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Responses.Marshal(b, m, deterministic) +func (*ResponseValue_Response) isResponseValue_Oneof() {} + +func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} + +// Response objects names can either be any valid HTTP status code or 'default'. +type Responses struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Responses) XXX_Merge(src proto.Message) { - xxx_messageInfo_Responses.Merge(m, src) + +func (x *Responses) Reset() { + *x = Responses{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Responses) XXX_Size() int { - return xxx_messageInfo_Responses.Size(m) + +func (x *Responses) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Responses) XXX_DiscardUnknown() { - xxx_messageInfo_Responses.DiscardUnknown(m) + +func (*Responses) ProtoMessage() {} + +func (x *Responses) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Responses proto.InternalMessageInfo +// Deprecated: Use Responses.ProtoReflect.Descriptor instead. +func (*Responses) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{49} +} -func (m *Responses) GetResponseCode() []*NamedResponseValue { - if m != nil { - return m.ResponseCode +func (x *Responses) GetResponseCode() []*NamedResponseValue { + if x != nil { + return x.ResponseCode } return nil } -func (m *Responses) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Responses) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } // A deterministic version of a JSON Schema object. type Schema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` @@ -4155,304 +4538,300 @@ type Schema struct { ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` Example *Any `protobuf:"bytes,30,opt,name=example,proto3" json:"example,omitempty"` VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Schema) Reset() { *m = Schema{} } -func (m *Schema) String() string { return proto.CompactTextString(m) } -func (*Schema) ProtoMessage() {} -func (*Schema) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{50} +func (x *Schema) Reset() { + *x = Schema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Schema) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Schema.Unmarshal(m, b) -} -func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Schema.Marshal(b, m, deterministic) -} -func (m *Schema) XXX_Merge(src proto.Message) { - xxx_messageInfo_Schema.Merge(m, src) -} -func (m *Schema) XXX_Size() int { - return xxx_messageInfo_Schema.Size(m) +func (x *Schema) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Schema) XXX_DiscardUnknown() { - xxx_messageInfo_Schema.DiscardUnknown(m) + +func (*Schema) ProtoMessage() {} + +func (x *Schema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Schema proto.InternalMessageInfo +// Deprecated: Use Schema.ProtoReflect.Descriptor instead. +func (*Schema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{50} +} -func (m *Schema) GetXRef() string { - if m != nil { - return m.XRef +func (x *Schema) GetXRef() string { + if x != nil { + return x.XRef } return "" } -func (m *Schema) GetFormat() string { - if m != nil { - return m.Format +func (x *Schema) GetFormat() string { + if x != nil { + return x.Format } return "" } -func (m *Schema) GetTitle() string { - if m != nil { - return m.Title +func (x *Schema) GetTitle() string { + if x != nil { + return x.Title } return "" } -func (m *Schema) GetDescription() string { - if m != nil { - return m.Description +func (x *Schema) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Schema) GetDefault() *Any { - if m != nil { - return m.Default +func (x *Schema) GetDefault() *Any { + if x != nil { + return x.Default } return nil } -func (m *Schema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf +func (x *Schema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf } return 0 } -func (m *Schema) GetMaximum() float64 { - if m != nil { - return m.Maximum +func (x *Schema) GetMaximum() float64 { + if x != nil { + return x.Maximum } return 0 } -func (m *Schema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum +func (x *Schema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum } return false } -func (m *Schema) GetMinimum() float64 { - if m != nil { - return m.Minimum +func (x *Schema) GetMinimum() float64 { + if x != nil { + return x.Minimum } return 0 } -func (m *Schema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum +func (x *Schema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum } return false } -func (m *Schema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength +func (x *Schema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength } return 0 } -func (m *Schema) GetMinLength() int64 { - if m != nil { - return m.MinLength +func (x *Schema) GetMinLength() int64 { + if x != nil { + return x.MinLength } return 0 } -func (m *Schema) GetPattern() string { - if m != nil { - return m.Pattern +func (x *Schema) GetPattern() string { + if x != nil { + return x.Pattern } return "" } -func (m *Schema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems +func (x *Schema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems } return 0 } -func (m *Schema) GetMinItems() int64 { - if m != nil { - return m.MinItems +func (x *Schema) GetMinItems() int64 { + if x != nil { + return x.MinItems } return 0 } -func (m *Schema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems +func (x *Schema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems } return false } -func (m *Schema) GetMaxProperties() int64 { - if m != nil { - return m.MaxProperties +func (x *Schema) GetMaxProperties() int64 { + if x != nil { + return x.MaxProperties } return 0 } -func (m *Schema) GetMinProperties() int64 { - if m != nil { - return m.MinProperties +func (x *Schema) GetMinProperties() int64 { + if x != nil { + return x.MinProperties } return 0 } -func (m *Schema) GetRequired() []string { - if m != nil { - return m.Required +func (x *Schema) GetRequired() []string { + if x != nil { + return x.Required } return nil } -func (m *Schema) GetEnum() []*Any { - if m != nil { - return m.Enum +func (x *Schema) GetEnum() []*Any { + if x != nil { + return x.Enum } return nil } -func (m *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { - if m != nil { - return m.AdditionalProperties +func (x *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { + if x != nil { + return x.AdditionalProperties } return nil } -func (m *Schema) GetType() *TypeItem { - if m != nil { - return m.Type +func (x *Schema) GetType() *TypeItem { + if x != nil { + return x.Type } return nil } -func (m *Schema) GetItems() *ItemsItem { - if m != nil { - return m.Items +func (x *Schema) GetItems() *ItemsItem { + if x != nil { + return x.Items } return nil } -func (m *Schema) GetAllOf() []*Schema { - if m != nil { - return m.AllOf +func (x *Schema) GetAllOf() []*Schema { + if x != nil { + return x.AllOf } return nil } -func (m *Schema) GetProperties() *Properties { - if m != nil { - return m.Properties +func (x *Schema) GetProperties() *Properties { + if x != nil { + return x.Properties } return nil } -func (m *Schema) GetDiscriminator() string { - if m != nil { - return m.Discriminator +func (x *Schema) GetDiscriminator() string { + if x != nil { + return x.Discriminator } return "" } -func (m *Schema) GetReadOnly() bool { - if m != nil { - return m.ReadOnly +func (x *Schema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly } return false } -func (m *Schema) GetXml() *Xml { - if m != nil { - return m.Xml +func (x *Schema) GetXml() *Xml { + if x != nil { + return x.Xml } return nil } -func (m *Schema) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs +func (x *Schema) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs } return nil } -func (m *Schema) GetExample() *Any { - if m != nil { - return m.Example +func (x *Schema) GetExample() *Any { + if x != nil { + return x.Example } return nil } -func (m *Schema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Schema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type SchemaItem struct { - // Types that are valid to be assigned to Oneof: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: // *SchemaItem_Schema // *SchemaItem_FileSchema - Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` } -func (m *SchemaItem) Reset() { *m = SchemaItem{} } -func (m *SchemaItem) String() string { return proto.CompactTextString(m) } -func (*SchemaItem) ProtoMessage() {} -func (*SchemaItem) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{51} +func (x *SchemaItem) Reset() { + *x = SchemaItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *SchemaItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SchemaItem.Unmarshal(m, b) -} -func (m *SchemaItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SchemaItem.Marshal(b, m, deterministic) -} -func (m *SchemaItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_SchemaItem.Merge(m, src) +func (x *SchemaItem) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SchemaItem) XXX_Size() int { - return xxx_messageInfo_SchemaItem.Size(m) -} -func (m *SchemaItem) XXX_DiscardUnknown() { - xxx_messageInfo_SchemaItem.DiscardUnknown(m) -} - -var xxx_messageInfo_SchemaItem proto.InternalMessageInfo -type isSchemaItem_Oneof interface { - isSchemaItem_Oneof() -} +func (*SchemaItem) ProtoMessage() {} -type SchemaItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` +func (x *SchemaItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type SchemaItem_FileSchema struct { - FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,proto3,oneof"` +// Deprecated: Use SchemaItem.ProtoReflect.Descriptor instead. +func (*SchemaItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{51} } -func (*SchemaItem_Schema) isSchemaItem_Oneof() {} - -func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} - func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { if m != nil { return m.Oneof @@ -4460,105 +4839,178 @@ func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { return nil } -func (m *SchemaItem) GetSchema() *Schema { - if x, ok := m.GetOneof().(*SchemaItem_Schema); ok { +func (x *SchemaItem) GetSchema() *Schema { + if x, ok := x.GetOneof().(*SchemaItem_Schema); ok { return x.Schema } return nil } -func (m *SchemaItem) GetFileSchema() *FileSchema { - if x, ok := m.GetOneof().(*SchemaItem_FileSchema); ok { +func (x *SchemaItem) GetFileSchema() *FileSchema { + if x, ok := x.GetOneof().(*SchemaItem_FileSchema); ok { return x.FileSchema } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*SchemaItem) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*SchemaItem_Schema)(nil), - (*SchemaItem_FileSchema)(nil), - } +type isSchemaItem_Oneof interface { + isSchemaItem_Oneof() } -type SecurityDefinitions struct { - AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type SchemaItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` } -func (m *SecurityDefinitions) Reset() { *m = SecurityDefinitions{} } -func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) } -func (*SecurityDefinitions) ProtoMessage() {} -func (*SecurityDefinitions) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{52} +type SchemaItem_FileSchema struct { + FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,proto3,oneof"` } -func (m *SecurityDefinitions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SecurityDefinitions.Unmarshal(m, b) -} -func (m *SecurityDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SecurityDefinitions.Marshal(b, m, deterministic) +func (*SchemaItem_Schema) isSchemaItem_Oneof() {} + +func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} + +type SecurityDefinitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` } -func (m *SecurityDefinitions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SecurityDefinitions.Merge(m, src) + +func (x *SecurityDefinitions) Reset() { + *x = SecurityDefinitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *SecurityDefinitions) XXX_Size() int { - return xxx_messageInfo_SecurityDefinitions.Size(m) + +func (x *SecurityDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SecurityDefinitions) XXX_DiscardUnknown() { - xxx_messageInfo_SecurityDefinitions.DiscardUnknown(m) + +func (*SecurityDefinitions) ProtoMessage() {} + +func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_SecurityDefinitions proto.InternalMessageInfo +// Deprecated: Use SecurityDefinitions.ProtoReflect.Descriptor instead. +func (*SecurityDefinitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{52} +} -func (m *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { - if m != nil { - return m.AdditionalProperties +func (x *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { + if x != nil { + return x.AdditionalProperties } return nil } type SecurityDefinitionsItem struct { - // Types that are valid to be assigned to Oneof: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: // *SecurityDefinitionsItem_BasicAuthenticationSecurity // *SecurityDefinitionsItem_ApiKeySecurity // *SecurityDefinitionsItem_Oauth2ImplicitSecurity // *SecurityDefinitionsItem_Oauth2PasswordSecurity // *SecurityDefinitionsItem_Oauth2ApplicationSecurity // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity - Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` } -func (m *SecurityDefinitionsItem) Reset() { *m = SecurityDefinitionsItem{} } -func (m *SecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } -func (*SecurityDefinitionsItem) ProtoMessage() {} +func (x *SecurityDefinitionsItem) Reset() { + *x = SecurityDefinitionsItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityDefinitionsItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityDefinitionsItem) ProtoMessage() {} + +func (x *SecurityDefinitionsItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityDefinitionsItem.ProtoReflect.Descriptor instead. func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{53} + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{53} } -func (m *SecurityDefinitionsItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SecurityDefinitionsItem.Unmarshal(m, b) +func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { + if m != nil { + return m.Oneof + } + return nil } -func (m *SecurityDefinitionsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SecurityDefinitionsItem.Marshal(b, m, deterministic) + +func (x *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { + return x.BasicAuthenticationSecurity + } + return nil +} + +func (x *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { + return x.ApiKeySecurity + } + return nil } -func (m *SecurityDefinitionsItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_SecurityDefinitionsItem.Merge(m, src) + +func (x *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { + return x.Oauth2ImplicitSecurity + } + return nil } -func (m *SecurityDefinitionsItem) XXX_Size() int { - return xxx_messageInfo_SecurityDefinitionsItem.Size(m) + +func (x *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { + return x.Oauth2PasswordSecurity + } + return nil } -func (m *SecurityDefinitionsItem) XXX_DiscardUnknown() { - xxx_messageInfo_SecurityDefinitionsItem.DiscardUnknown(m) + +func (x *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { + return x.Oauth2ApplicationSecurity + } + return nil } -var xxx_messageInfo_SecurityDefinitionsItem proto.InternalMessageInfo +func (x *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { + return x.Oauth2AccessCodeSecurity + } + return nil +} type isSecurityDefinitionsItem_Oneof interface { isSecurityDefinitionsItem_Oneof() @@ -4600,627 +5052,2296 @@ func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsI func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} -func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { - if m != nil { - return m.Oneof - } - return nil +type SecurityRequirement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` } -func (m *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { - return x.BasicAuthenticationSecurity +func (x *SecurityRequirement) Reset() { + *x = SecurityRequirement{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (m *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { - return x.ApiKeySecurity - } - return nil +func (x *SecurityRequirement) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { - return x.Oauth2ImplicitSecurity +func (*SecurityRequirement) ProtoMessage() {} + +func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -func (m *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { - return x.Oauth2PasswordSecurity - } - return nil +// Deprecated: Use SecurityRequirement.ProtoReflect.Descriptor instead. +func (*SecurityRequirement) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{54} } -func (m *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { - return x.Oauth2ApplicationSecurity +func (x *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { + if x != nil { + return x.AdditionalProperties } return nil } -func (m *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { - return x.Oauth2AccessCodeSecurity - } - return nil +type StringArray struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*SecurityDefinitionsItem) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), - (*SecurityDefinitionsItem_ApiKeySecurity)(nil), - (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), +func (x *StringArray) Reset() { + *x = StringArray{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -type SecurityRequirement struct { - AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *StringArray) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SecurityRequirement) Reset() { *m = SecurityRequirement{} } -func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) } -func (*SecurityRequirement) ProtoMessage() {} -func (*SecurityRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{54} -} +func (*StringArray) ProtoMessage() {} -func (m *SecurityRequirement) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SecurityRequirement.Unmarshal(m, b) -} -func (m *SecurityRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SecurityRequirement.Marshal(b, m, deterministic) -} -func (m *SecurityRequirement) XXX_Merge(src proto.Message) { - xxx_messageInfo_SecurityRequirement.Merge(m, src) -} -func (m *SecurityRequirement) XXX_Size() int { - return xxx_messageInfo_SecurityRequirement.Size(m) -} -func (m *SecurityRequirement) XXX_DiscardUnknown() { - xxx_messageInfo_SecurityRequirement.DiscardUnknown(m) +func (x *StringArray) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_SecurityRequirement proto.InternalMessageInfo +// Deprecated: Use StringArray.ProtoReflect.Descriptor instead. +func (*StringArray) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{55} +} -func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { - if m != nil { - return m.AdditionalProperties +func (x *StringArray) GetValue() []string { + if x != nil { + return x.Value } return nil } -type StringArray struct { - Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +type Tag struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *StringArray) Reset() { *m = StringArray{} } -func (m *StringArray) String() string { return proto.CompactTextString(m) } -func (*StringArray) ProtoMessage() {} -func (*StringArray) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{55} + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *StringArray) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StringArray.Unmarshal(m, b) -} -func (m *StringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StringArray.Marshal(b, m, deterministic) -} -func (m *StringArray) XXX_Merge(src proto.Message) { - xxx_messageInfo_StringArray.Merge(m, src) -} -func (m *StringArray) XXX_Size() int { - return xxx_messageInfo_StringArray.Size(m) +func (x *Tag) Reset() { + *x = Tag{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *StringArray) XXX_DiscardUnknown() { - xxx_messageInfo_StringArray.DiscardUnknown(m) + +func (x *Tag) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_StringArray proto.InternalMessageInfo +func (*Tag) ProtoMessage() {} -func (m *StringArray) GetValue() []string { - if m != nil { - return m.Value +func (x *Tag) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -type Tag struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Tag) Reset() { *m = Tag{} } -func (m *Tag) String() string { return proto.CompactTextString(m) } -func (*Tag) ProtoMessage() {} +// Deprecated: Use Tag.ProtoReflect.Descriptor instead. func (*Tag) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{56} + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{56} } -func (m *Tag) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Tag.Unmarshal(m, b) -} -func (m *Tag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Tag.Marshal(b, m, deterministic) -} -func (m *Tag) XXX_Merge(src proto.Message) { - xxx_messageInfo_Tag.Merge(m, src) -} -func (m *Tag) XXX_Size() int { - return xxx_messageInfo_Tag.Size(m) -} -func (m *Tag) XXX_DiscardUnknown() { - xxx_messageInfo_Tag.DiscardUnknown(m) -} - -var xxx_messageInfo_Tag proto.InternalMessageInfo - -func (m *Tag) GetName() string { - if m != nil { - return m.Name +func (x *Tag) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *Tag) GetDescription() string { - if m != nil { - return m.Description +func (x *Tag) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Tag) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs +func (x *Tag) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs } return nil } -func (m *Tag) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension +func (x *Tag) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension } return nil } type TypeItem struct { - Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *TypeItem) Reset() { *m = TypeItem{} } -func (m *TypeItem) String() string { return proto.CompactTextString(m) } -func (*TypeItem) ProtoMessage() {} -func (*TypeItem) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{57} + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` } -func (m *TypeItem) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TypeItem.Unmarshal(m, b) -} -func (m *TypeItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TypeItem.Marshal(b, m, deterministic) -} -func (m *TypeItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_TypeItem.Merge(m, src) +func (x *TypeItem) Reset() { + *x = TypeItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *TypeItem) XXX_Size() int { - return xxx_messageInfo_TypeItem.Size(m) + +func (x *TypeItem) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TypeItem) XXX_DiscardUnknown() { - xxx_messageInfo_TypeItem.DiscardUnknown(m) + +func (*TypeItem) ProtoMessage() {} + +func (x *TypeItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_TypeItem proto.InternalMessageInfo +// Deprecated: Use TypeItem.ProtoReflect.Descriptor instead. +func (*TypeItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{57} +} -func (m *TypeItem) GetValue() []string { - if m != nil { - return m.Value +func (x *TypeItem) GetValue() []string { + if x != nil { + return x.Value } return nil } // Any property starting with x- is valid. type VendorExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *VendorExtension) Reset() { *m = VendorExtension{} } -func (m *VendorExtension) String() string { return proto.CompactTextString(m) } -func (*VendorExtension) ProtoMessage() {} -func (*VendorExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{58} +func (x *VendorExtension) Reset() { + *x = VendorExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *VendorExtension) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VendorExtension.Unmarshal(m, b) -} -func (m *VendorExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VendorExtension.Marshal(b, m, deterministic) +func (x *VendorExtension) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VendorExtension) XXX_Merge(src proto.Message) { - xxx_messageInfo_VendorExtension.Merge(m, src) -} -func (m *VendorExtension) XXX_Size() int { - return xxx_messageInfo_VendorExtension.Size(m) -} -func (m *VendorExtension) XXX_DiscardUnknown() { - xxx_messageInfo_VendorExtension.DiscardUnknown(m) + +func (*VendorExtension) ProtoMessage() {} + +func (x *VendorExtension) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_VendorExtension proto.InternalMessageInfo +// Deprecated: Use VendorExtension.ProtoReflect.Descriptor instead. +func (*VendorExtension) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{58} +} -func (m *VendorExtension) GetAdditionalProperties() []*NamedAny { - if m != nil { - return m.AdditionalProperties +func (x *VendorExtension) GetAdditionalProperties() []*NamedAny { + if x != nil { + return x.AdditionalProperties } return nil } type Xml struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` - Attribute bool `protobuf:"varint,4,opt,name=attribute,proto3" json:"attribute,omitempty"` - Wrapped bool `protobuf:"varint,5,opt,name=wrapped,proto3" json:"wrapped,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Xml) Reset() { *m = Xml{} } -func (m *Xml) String() string { return proto.CompactTextString(m) } -func (*Xml) ProtoMessage() {} -func (*Xml) Descriptor() ([]byte, []int) { - return fileDescriptor_336adc04ae589d92, []int{59} -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Xml) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Xml.Unmarshal(m, b) -} -func (m *Xml) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Xml.Marshal(b, m, deterministic) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` + Attribute bool `protobuf:"varint,4,opt,name=attribute,proto3" json:"attribute,omitempty"` + Wrapped bool `protobuf:"varint,5,opt,name=wrapped,proto3" json:"wrapped,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } -func (m *Xml) XXX_Merge(src proto.Message) { - xxx_messageInfo_Xml.Merge(m, src) + +func (x *Xml) Reset() { + *x = Xml{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Xml) XXX_Size() int { - return xxx_messageInfo_Xml.Size(m) + +func (x *Xml) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Xml) XXX_DiscardUnknown() { - xxx_messageInfo_Xml.DiscardUnknown(m) + +func (*Xml) ProtoMessage() {} + +func (x *Xml) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Xml proto.InternalMessageInfo +// Deprecated: Use Xml.ProtoReflect.Descriptor instead. +func (*Xml) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{59} +} -func (m *Xml) GetName() string { - if m != nil { - return m.Name +func (x *Xml) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *Xml) GetNamespace() string { - if m != nil { - return m.Namespace +func (x *Xml) GetNamespace() string { + if x != nil { + return x.Namespace } return "" } -func (m *Xml) GetPrefix() string { - if m != nil { - return m.Prefix +func (x *Xml) GetPrefix() string { + if x != nil { + return x.Prefix } return "" } -func (m *Xml) GetAttribute() bool { - if m != nil { - return m.Attribute +func (x *Xml) GetAttribute() bool { + if x != nil { + return x.Attribute } return false } -func (m *Xml) GetWrapped() bool { - if m != nil { - return m.Wrapped +func (x *Xml) GetWrapped() bool { + if x != nil { + return x.Wrapped } return false } -func (m *Xml) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -func init() { - proto.RegisterType((*AdditionalPropertiesItem)(nil), "openapi.v2.AdditionalPropertiesItem") - proto.RegisterType((*Any)(nil), "openapi.v2.Any") - proto.RegisterType((*ApiKeySecurity)(nil), "openapi.v2.ApiKeySecurity") - proto.RegisterType((*BasicAuthenticationSecurity)(nil), "openapi.v2.BasicAuthenticationSecurity") - proto.RegisterType((*BodyParameter)(nil), "openapi.v2.BodyParameter") - proto.RegisterType((*Contact)(nil), "openapi.v2.Contact") - proto.RegisterType((*Default)(nil), "openapi.v2.Default") - proto.RegisterType((*Definitions)(nil), "openapi.v2.Definitions") - proto.RegisterType((*Document)(nil), "openapi.v2.Document") - proto.RegisterType((*Examples)(nil), "openapi.v2.Examples") - proto.RegisterType((*ExternalDocs)(nil), "openapi.v2.ExternalDocs") - proto.RegisterType((*FileSchema)(nil), "openapi.v2.FileSchema") - proto.RegisterType((*FormDataParameterSubSchema)(nil), "openapi.v2.FormDataParameterSubSchema") - proto.RegisterType((*Header)(nil), "openapi.v2.Header") - proto.RegisterType((*HeaderParameterSubSchema)(nil), "openapi.v2.HeaderParameterSubSchema") - proto.RegisterType((*Headers)(nil), "openapi.v2.Headers") - proto.RegisterType((*Info)(nil), "openapi.v2.Info") - proto.RegisterType((*ItemsItem)(nil), "openapi.v2.ItemsItem") - proto.RegisterType((*JsonReference)(nil), "openapi.v2.JsonReference") - proto.RegisterType((*License)(nil), "openapi.v2.License") - proto.RegisterType((*NamedAny)(nil), "openapi.v2.NamedAny") - proto.RegisterType((*NamedHeader)(nil), "openapi.v2.NamedHeader") - proto.RegisterType((*NamedParameter)(nil), "openapi.v2.NamedParameter") - proto.RegisterType((*NamedPathItem)(nil), "openapi.v2.NamedPathItem") - proto.RegisterType((*NamedResponse)(nil), "openapi.v2.NamedResponse") - proto.RegisterType((*NamedResponseValue)(nil), "openapi.v2.NamedResponseValue") - proto.RegisterType((*NamedSchema)(nil), "openapi.v2.NamedSchema") - proto.RegisterType((*NamedSecurityDefinitionsItem)(nil), "openapi.v2.NamedSecurityDefinitionsItem") - proto.RegisterType((*NamedString)(nil), "openapi.v2.NamedString") - proto.RegisterType((*NamedStringArray)(nil), "openapi.v2.NamedStringArray") - proto.RegisterType((*NonBodyParameter)(nil), "openapi.v2.NonBodyParameter") - proto.RegisterType((*Oauth2AccessCodeSecurity)(nil), "openapi.v2.Oauth2AccessCodeSecurity") - proto.RegisterType((*Oauth2ApplicationSecurity)(nil), "openapi.v2.Oauth2ApplicationSecurity") - proto.RegisterType((*Oauth2ImplicitSecurity)(nil), "openapi.v2.Oauth2ImplicitSecurity") - proto.RegisterType((*Oauth2PasswordSecurity)(nil), "openapi.v2.Oauth2PasswordSecurity") - proto.RegisterType((*Oauth2Scopes)(nil), "openapi.v2.Oauth2Scopes") - proto.RegisterType((*Operation)(nil), "openapi.v2.Operation") - proto.RegisterType((*Parameter)(nil), "openapi.v2.Parameter") - proto.RegisterType((*ParameterDefinitions)(nil), "openapi.v2.ParameterDefinitions") - proto.RegisterType((*ParametersItem)(nil), "openapi.v2.ParametersItem") - proto.RegisterType((*PathItem)(nil), "openapi.v2.PathItem") - proto.RegisterType((*PathParameterSubSchema)(nil), "openapi.v2.PathParameterSubSchema") - proto.RegisterType((*Paths)(nil), "openapi.v2.Paths") - proto.RegisterType((*PrimitivesItems)(nil), "openapi.v2.PrimitivesItems") - proto.RegisterType((*Properties)(nil), "openapi.v2.Properties") - proto.RegisterType((*QueryParameterSubSchema)(nil), "openapi.v2.QueryParameterSubSchema") - proto.RegisterType((*Response)(nil), "openapi.v2.Response") - proto.RegisterType((*ResponseDefinitions)(nil), "openapi.v2.ResponseDefinitions") - proto.RegisterType((*ResponseValue)(nil), "openapi.v2.ResponseValue") - proto.RegisterType((*Responses)(nil), "openapi.v2.Responses") - proto.RegisterType((*Schema)(nil), "openapi.v2.Schema") - proto.RegisterType((*SchemaItem)(nil), "openapi.v2.SchemaItem") - proto.RegisterType((*SecurityDefinitions)(nil), "openapi.v2.SecurityDefinitions") - proto.RegisterType((*SecurityDefinitionsItem)(nil), "openapi.v2.SecurityDefinitionsItem") - proto.RegisterType((*SecurityRequirement)(nil), "openapi.v2.SecurityRequirement") - proto.RegisterType((*StringArray)(nil), "openapi.v2.StringArray") - proto.RegisterType((*Tag)(nil), "openapi.v2.Tag") - proto.RegisterType((*TypeItem)(nil), "openapi.v2.TypeItem") - proto.RegisterType((*VendorExtension)(nil), "openapi.v2.VendorExtension") - proto.RegisterType((*Xml)(nil), "openapi.v2.Xml") -} - -func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor_336adc04ae589d92) } - -var fileDescriptor_336adc04ae589d92 = []byte{ - // 3129 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, - 0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c, - 0xe7, 0xb3, 0x9c, 0x4f, 0x29, 0x48, 0x05, 0x2a, 0x05, 0xf2, 0xab, 0xc6, 0xc4, 0x44, 0x4a, 0xcb, - 0x0e, 0x09, 0x04, 0xba, 0xae, 0x66, 0xee, 0x48, 0x9d, 0x74, 0xf7, 0x6d, 0x77, 0xf7, 0xc8, 0x1a, - 0x16, 0x2c, 0xa0, 0x8a, 0x35, 0x50, 0x59, 0x53, 0x15, 0x16, 0x14, 0x55, 0x59, 0xb0, 0x62, 0xc5, - 0x1f, 0x60, 0xc7, 0x3f, 0x60, 0x0d, 0x5b, 0xaa, 0x58, 0x51, 0x3c, 0xea, 0xbe, 0xfa, 0x31, 0x7d, - 0x7b, 0x1e, 0x96, 0x0b, 0x28, 0xd0, 0x6a, 0xe6, 0xde, 0x73, 0xee, 0xb9, 0xa7, 0x4f, 0x9f, 0xd7, - 0x3d, 0xe7, 0x36, 0xac, 0xef, 0x79, 0xd8, 0xdd, 0xdd, 0x7f, 0x70, 0xb2, 0x73, 0x2b, 0xfa, 0xb7, - 0xed, 0xf9, 0x24, 0x24, 0x1a, 0x10, 0x0f, 0xbb, 0xc8, 0xb3, 0xb6, 0x4f, 0x76, 0x36, 0xd6, 0x8f, - 0x08, 0x39, 0xb2, 0xf1, 0x2d, 0x06, 0x39, 0x1c, 0x0e, 0x6e, 0x21, 0x77, 0xc4, 0xd1, 0xb6, 0x1c, - 0xd0, 0x77, 0xfb, 0x7d, 0x2b, 0xb4, 0x88, 0x8b, 0xec, 0x7d, 0x9f, 0x78, 0xd8, 0x0f, 0x2d, 0x1c, - 0x3c, 0x08, 0xb1, 0xa3, 0xfd, 0x1f, 0xd4, 0x82, 0xde, 0x31, 0x76, 0x90, 0x5e, 0xbc, 0x52, 0xbc, - 0xb6, 0xb0, 0xa3, 0x6d, 0xc7, 0x34, 0xb7, 0x0f, 0x18, 0xa4, 0x5b, 0x30, 0x04, 0x8e, 0xb6, 0x01, - 0xf5, 0x43, 0x42, 0x6c, 0x8c, 0x5c, 0xbd, 0x74, 0xa5, 0x78, 0xad, 0xd1, 0x2d, 0x18, 0x72, 0xe2, - 0x76, 0x1d, 0xaa, 0xc4, 0xc5, 0x64, 0xb0, 0x75, 0x0f, 0xca, 0xbb, 0xee, 0x48, 0xbb, 0x01, 0xd5, - 0x13, 0x64, 0x0f, 0xb1, 0x20, 0xbc, 0xba, 0xcd, 0x19, 0xdc, 0x96, 0x0c, 0x6e, 0xef, 0xba, 0x23, - 0x83, 0xa3, 0x68, 0x1a, 0x54, 0x46, 0xc8, 0xb1, 0x19, 0xd1, 0xa6, 0xc1, 0xfe, 0x6f, 0x7d, 0x51, - 0x84, 0xf6, 0xae, 0x67, 0xbd, 0x8b, 0x47, 0x07, 0xb8, 0x37, 0xf4, 0xad, 0x70, 0x44, 0xd1, 0xc2, - 0x91, 0xc7, 0x29, 0x36, 0x0d, 0xf6, 0x9f, 0xce, 0xb9, 0xc8, 0xc1, 0x72, 0x29, 0xfd, 0xaf, 0xb5, - 0xa1, 0x64, 0xb9, 0x7a, 0x99, 0xcd, 0x94, 0x2c, 0x57, 0xbb, 0x02, 0x0b, 0x7d, 0x1c, 0xf4, 0x7c, - 0xcb, 0xa3, 0x32, 0xd0, 0x2b, 0x0c, 0x90, 0x9c, 0xd2, 0xbe, 0x06, 0x9d, 0x13, 0xec, 0xf6, 0x89, - 0x6f, 0xe2, 0xd3, 0x10, 0xbb, 0x01, 0x45, 0xab, 0x5e, 0x29, 0x33, 0xbe, 0x13, 0x02, 0x79, 0x0f, - 0x39, 0xb8, 0x4f, 0xf9, 0x5e, 0xe2, 0xd8, 0xf7, 0x24, 0xf2, 0xd6, 0x67, 0x45, 0xd8, 0xbc, 0x8d, - 0x02, 0xab, 0xb7, 0x3b, 0x0c, 0x8f, 0xb1, 0x1b, 0x5a, 0x3d, 0x44, 0x09, 0x4f, 0x64, 0x7d, 0x8c, - 0xad, 0xd2, 0x6c, 0x6c, 0x95, 0xe7, 0x61, 0xeb, 0x0f, 0x45, 0x68, 0xdd, 0x26, 0xfd, 0xd1, 0x3e, - 0xf2, 0x91, 0x83, 0x43, 0xec, 0x8f, 0x6f, 0x5a, 0xcc, 0x6e, 0x3a, 0x8b, 0x44, 0x37, 0xa0, 0xe1, - 0xe3, 0x27, 0x43, 0xcb, 0xc7, 0x7d, 0x26, 0xce, 0x86, 0x11, 0x8d, 0xb5, 0x1b, 0x91, 0x4a, 0x55, - 0xf3, 0x54, 0x2a, 0x52, 0x28, 0xd5, 0x03, 0xd6, 0xe6, 0x79, 0xc0, 0x1f, 0x17, 0xa1, 0x7e, 0x87, - 0xb8, 0x21, 0xea, 0x85, 0x11, 0xe3, 0xc5, 0x04, 0xe3, 0x1d, 0x28, 0x0f, 0x7d, 0xa9, 0x58, 0xf4, - 0xaf, 0xb6, 0x0a, 0x55, 0xec, 0x20, 0xcb, 0x16, 0x4f, 0xc3, 0x07, 0x4a, 0x46, 0x2a, 0xf3, 0x30, - 0xf2, 0x08, 0xea, 0x77, 0xf1, 0x00, 0x0d, 0xed, 0x50, 0x7b, 0x00, 0x17, 0x50, 0x64, 0x6f, 0xa6, - 0x17, 0x19, 0x9c, 0x5e, 0x9c, 0x40, 0x70, 0x15, 0x29, 0x4c, 0x74, 0xeb, 0x3b, 0xb0, 0x70, 0x17, - 0x0f, 0x2c, 0x97, 0x41, 0x02, 0xed, 0xe1, 0x64, 0xca, 0x17, 0x33, 0x94, 0x85, 0xb8, 0xd5, 0xc4, - 0xff, 0x58, 0x85, 0xc6, 0x5d, 0xd2, 0x1b, 0x3a, 0xd8, 0x0d, 0x35, 0x1d, 0xea, 0xc1, 0x53, 0x74, - 0x74, 0x84, 0x7d, 0x21, 0x3f, 0x39, 0xd4, 0x5e, 0x86, 0x8a, 0xe5, 0x0e, 0x08, 0x93, 0xe1, 0xc2, - 0x4e, 0x27, 0xb9, 0xc7, 0x03, 0x77, 0x40, 0x0c, 0x06, 0xa5, 0xc2, 0x3f, 0x26, 0x41, 0x28, 0xa4, - 0xca, 0xfe, 0x6b, 0x9b, 0xd0, 0x3c, 0x44, 0x01, 0x36, 0x3d, 0x14, 0x1e, 0x0b, 0xab, 0x6b, 0xd0, - 0x89, 0x7d, 0x14, 0x1e, 0xb3, 0x0d, 0x29, 0x77, 0x38, 0x60, 0x96, 0x46, 0x37, 0xe4, 0x43, 0xaa, - 0x5c, 0x3d, 0xe2, 0x06, 0x43, 0x0a, 0xaa, 0x31, 0x50, 0x34, 0xa6, 0x30, 0xcf, 0x27, 0xfd, 0x61, - 0x0f, 0x07, 0x7a, 0x9d, 0xc3, 0xe4, 0x58, 0x7b, 0x0d, 0xaa, 0x74, 0xa7, 0x40, 0x6f, 0x30, 0x4e, - 0x97, 0x93, 0x9c, 0xd2, 0x2d, 0x03, 0x83, 0xc3, 0xb5, 0xb7, 0xa9, 0x0d, 0x44, 0x52, 0xd5, 0x9b, - 0x0c, 0x3d, 0x25, 0xbc, 0x84, 0xd0, 0x8d, 0x24, 0xae, 0xf6, 0x75, 0x00, 0x4f, 0xda, 0x52, 0xa0, - 0x03, 0x5b, 0x79, 0x25, 0xbd, 0x91, 0x80, 0x26, 0x49, 0x24, 0xd6, 0x68, 0xef, 0x40, 0xd3, 0xc7, - 0x81, 0x47, 0xdc, 0x00, 0x07, 0xfa, 0x02, 0x23, 0xf0, 0x62, 0x92, 0x80, 0x21, 0x80, 0xc9, 0xf5, - 0xf1, 0x0a, 0xed, 0xab, 0xd0, 0x08, 0x84, 0x53, 0xd1, 0x17, 0xd9, 0x5b, 0x4f, 0xad, 0x96, 0x0e, - 0xc7, 0xe0, 0xd6, 0x48, 0x5f, 0xad, 0x11, 0x2d, 0xd0, 0x0c, 0x58, 0x95, 0xff, 0xcd, 0xa4, 0x04, - 0x5a, 0x59, 0x36, 0x24, 0xa1, 0x24, 0x1b, 0x2b, 0x41, 0x76, 0x52, 0xbb, 0x0a, 0x95, 0x10, 0x1d, - 0x05, 0x7a, 0x9b, 0x31, 0xb3, 0x94, 0xa4, 0xf1, 0x08, 0x1d, 0x19, 0x0c, 0xa8, 0xbd, 0x03, 0x2d, - 0x6a, 0x57, 0x3e, 0x55, 0xdb, 0x3e, 0xe9, 0x05, 0xfa, 0x12, 0xdb, 0x51, 0x4f, 0x62, 0xdf, 0x13, - 0x08, 0x77, 0x49, 0x2f, 0x30, 0x16, 0x71, 0x62, 0xa4, 0xb4, 0xce, 0xce, 0x3c, 0xd6, 0xf9, 0x18, - 0x1a, 0xf7, 0x4e, 0x91, 0xe3, 0xd9, 0x38, 0x78, 0x9e, 0xe6, 0xf9, 0xa3, 0x22, 0x2c, 0x26, 0xd9, - 0x9e, 0xc1, 0xbb, 0x66, 0x1d, 0xd2, 0x99, 0x9d, 0xfc, 0x3f, 0x4a, 0x00, 0xf7, 0x2d, 0x1b, 0x73, - 0x63, 0xd7, 0xd6, 0xa0, 0x36, 0x20, 0xbe, 0x83, 0x42, 0xb1, 0xbd, 0x18, 0x51, 0xc7, 0x17, 0x5a, - 0xa1, 0x2d, 0x1d, 0x3b, 0x1f, 0x8c, 0x73, 0x5c, 0xce, 0x72, 0x7c, 0x1d, 0xea, 0x7d, 0xee, 0xd9, - 0x98, 0x0d, 0x8f, 0xbd, 0x63, 0xca, 0x91, 0x84, 0xa7, 0xc2, 0x02, 0x37, 0xea, 0x38, 0x2c, 0xc8, - 0x08, 0x58, 0x4b, 0x44, 0xc0, 0x4d, 0x6a, 0x0b, 0xa8, 0x6f, 0x12, 0xd7, 0x1e, 0xe9, 0x75, 0x19, - 0x47, 0x50, 0x7f, 0xcf, 0xb5, 0x47, 0x59, 0x9d, 0x69, 0xcc, 0xa5, 0x33, 0xd7, 0xa1, 0x8e, 0xf9, - 0x2b, 0x17, 0x06, 0x9e, 0x65, 0x5b, 0xc0, 0x95, 0x6f, 0x00, 0xe6, 0x79, 0x03, 0x5f, 0xd4, 0x60, - 0xe3, 0x3e, 0xf1, 0x9d, 0xbb, 0x28, 0x44, 0x91, 0x03, 0x38, 0x18, 0x1e, 0x1e, 0xc8, 0xb4, 0x29, - 0x16, 0x4b, 0x71, 0x2c, 0x5a, 0xf2, 0xc8, 0x5a, 0xca, 0xcb, 0x55, 0xca, 0xf9, 0xf1, 0xb9, 0x92, - 0x08, 0x73, 0x37, 0x60, 0x19, 0xd9, 0x36, 0x79, 0x6a, 0x62, 0xc7, 0x0b, 0x47, 0x26, 0x4f, 0xbc, - 0xaa, 0x6c, 0xab, 0x25, 0x06, 0xb8, 0x47, 0xe7, 0x3f, 0x90, 0xc9, 0x56, 0xe6, 0x45, 0xc4, 0x3a, - 0x53, 0x4f, 0xe9, 0xcc, 0xff, 0x43, 0xd5, 0x0a, 0xb1, 0x23, 0x65, 0xbf, 0x99, 0xf2, 0x74, 0xbe, - 0xe5, 0x58, 0xa1, 0x75, 0xc2, 0x33, 0xc9, 0xc0, 0xe0, 0x98, 0xda, 0xeb, 0xb0, 0xdc, 0x23, 0xb6, - 0x8d, 0x7b, 0x94, 0x59, 0x53, 0x50, 0x6d, 0x32, 0xaa, 0x9d, 0x18, 0x70, 0x9f, 0xd3, 0x4f, 0xe8, - 0x16, 0x4c, 0xd1, 0x2d, 0x1d, 0xea, 0x0e, 0x3a, 0xb5, 0x9c, 0xa1, 0xc3, 0xbc, 0x66, 0xd1, 0x90, - 0x43, 0xba, 0x23, 0x3e, 0xed, 0xd9, 0xc3, 0xc0, 0x3a, 0xc1, 0xa6, 0xc4, 0x59, 0x64, 0x0f, 0xdf, - 0x89, 0x00, 0xdf, 0x14, 0xc8, 0x94, 0x8c, 0xe5, 0x32, 0x94, 0x96, 0x20, 0xc3, 0x87, 0x63, 0x64, - 0x04, 0x4e, 0x7b, 0x9c, 0x8c, 0x40, 0x7e, 0x01, 0xc0, 0x41, 0xa7, 0xa6, 0x8d, 0xdd, 0xa3, 0xf0, - 0x98, 0x79, 0xb3, 0xb2, 0xd1, 0x74, 0xd0, 0xe9, 0x43, 0x36, 0xc1, 0xc0, 0x96, 0x2b, 0xc1, 0x1d, - 0x01, 0xb6, 0x5c, 0x01, 0xd6, 0xa1, 0xee, 0xa1, 0x90, 0x2a, 0xab, 0xbe, 0xcc, 0x83, 0xad, 0x18, - 0x52, 0x8b, 0xa0, 0x74, 0xb9, 0xd0, 0x35, 0xb6, 0xae, 0xe1, 0xa0, 0x53, 0x26, 0x61, 0x06, 0xb4, - 0x5c, 0x01, 0x5c, 0x11, 0x40, 0xcb, 0xe5, 0xc0, 0x97, 0x60, 0x71, 0xe8, 0x5a, 0x4f, 0x86, 0x58, - 0xc0, 0x57, 0x19, 0xe7, 0x0b, 0x7c, 0x8e, 0xa3, 0x5c, 0x85, 0x0a, 0x76, 0x87, 0x8e, 0x7e, 0x21, - 0xeb, 0xaa, 0xa9, 0xa8, 0x19, 0x50, 0x7b, 0x11, 0x16, 0x9c, 0xa1, 0x1d, 0x5a, 0x9e, 0x8d, 0x4d, - 0x32, 0xd0, 0xd7, 0x98, 0x90, 0x40, 0x4e, 0xed, 0x0d, 0x94, 0xd6, 0x72, 0x71, 0x2e, 0x6b, 0xa9, - 0x42, 0xad, 0x8b, 0x51, 0x1f, 0xfb, 0xca, 0xb4, 0x38, 0xd6, 0xc5, 0x92, 0x5a, 0x17, 0xcb, 0x67, - 0xd3, 0xc5, 0xca, 0x74, 0x5d, 0xac, 0xce, 0xae, 0x8b, 0xb5, 0x19, 0x74, 0xb1, 0x3e, 0x5d, 0x17, - 0x1b, 0x33, 0xe8, 0x62, 0x73, 0x26, 0x5d, 0x84, 0xc9, 0xba, 0xb8, 0x30, 0x41, 0x17, 0x17, 0x27, - 0xe8, 0x62, 0x6b, 0x92, 0x2e, 0xb6, 0xa7, 0xe8, 0xe2, 0x52, 0xbe, 0x2e, 0x76, 0xe6, 0xd0, 0xc5, - 0xe5, 0x8c, 0x2e, 0x8e, 0x79, 0x4b, 0x6d, 0xb6, 0x23, 0xd4, 0xca, 0x3c, 0xda, 0xfa, 0xb7, 0x2a, - 0xe8, 0x5c, 0x5b, 0xff, 0x2d, 0x9e, 0x5d, 0x5a, 0x48, 0x55, 0x69, 0x21, 0x35, 0xb5, 0x85, 0xd4, - 0xcf, 0x66, 0x21, 0x8d, 0xe9, 0x16, 0xd2, 0x9c, 0xdd, 0x42, 0x60, 0x06, 0x0b, 0x59, 0x98, 0x6e, - 0x21, 0x8b, 0x33, 0x58, 0x48, 0x6b, 0x26, 0x0b, 0x69, 0x4f, 0xb6, 0x90, 0xa5, 0x09, 0x16, 0xd2, - 0x99, 0x60, 0x21, 0xcb, 0x93, 0x2c, 0x44, 0x9b, 0x62, 0x21, 0x2b, 0xf9, 0x16, 0xb2, 0x3a, 0x87, - 0x85, 0x5c, 0x98, 0xc9, 0x5b, 0xaf, 0xcd, 0xa3, 0xff, 0xdf, 0x82, 0x3a, 0x57, 0xff, 0x67, 0x38, - 0x7e, 0xf2, 0x85, 0x39, 0xc9, 0xf3, 0xe7, 0x25, 0xa8, 0xd0, 0x03, 0x64, 0x9c, 0x98, 0x16, 0x93, - 0x89, 0xa9, 0x0e, 0xf5, 0x13, 0xec, 0x07, 0x71, 0x65, 0x44, 0x0e, 0x67, 0x30, 0xa4, 0x6b, 0xd0, - 0x09, 0xb1, 0xef, 0x04, 0x26, 0x19, 0x98, 0x01, 0xf6, 0x4f, 0xac, 0x9e, 0x34, 0xaa, 0x36, 0x9b, - 0xdf, 0x1b, 0x1c, 0xf0, 0x59, 0xed, 0x26, 0xd4, 0x7b, 0xbc, 0x7c, 0x20, 0x9c, 0xfe, 0x4a, 0xf2, - 0x21, 0x44, 0x65, 0xc1, 0x90, 0x38, 0x14, 0xdd, 0xb6, 0x7a, 0xd8, 0x0d, 0x78, 0xfa, 0x34, 0x86, - 0xfe, 0x90, 0x83, 0x0c, 0x89, 0xa3, 0x14, 0x7e, 0x7d, 0x1e, 0xe1, 0xbf, 0x05, 0x4d, 0xa6, 0x0c, - 0xac, 0x56, 0x77, 0x23, 0x51, 0xab, 0x2b, 0x4f, 0x2e, 0xac, 0x6c, 0xdd, 0x85, 0xd6, 0x37, 0x02, - 0xe2, 0x1a, 0x78, 0x80, 0x7d, 0xec, 0xf6, 0xb0, 0xb6, 0x0c, 0x15, 0xd3, 0xc7, 0x03, 0x21, 0xe3, - 0xb2, 0x81, 0x07, 0xd3, 0xeb, 0x4f, 0x5b, 0x1e, 0xd4, 0xc5, 0x33, 0xcd, 0x58, 0x5c, 0x39, 0xf3, - 0x59, 0xe6, 0x1e, 0x34, 0x24, 0x50, 0xb9, 0xe5, 0x2b, 0xb2, 0xaa, 0x58, 0x52, 0x3b, 0x20, 0x0e, - 0xdd, 0x7a, 0x17, 0x16, 0x12, 0x0a, 0xa8, 0xa4, 0x74, 0x2d, 0x4d, 0x29, 0x25, 0x4c, 0xa1, 0xb7, - 0x82, 0xd8, 0xfb, 0xd0, 0x66, 0xc4, 0xe2, 0x22, 0x9a, 0x8a, 0xde, 0xeb, 0x69, 0x7a, 0x17, 0x94, - 0x45, 0x01, 0x49, 0x72, 0x0f, 0x5a, 0x82, 0x64, 0x78, 0xcc, 0xde, 0xad, 0x8a, 0xe2, 0x8d, 0x34, - 0xc5, 0xd5, 0xf1, 0x7a, 0x06, 0x5d, 0x38, 0x4e, 0x50, 0x56, 0x0f, 0xe6, 0x26, 0x28, 0x17, 0x4a, - 0x82, 0x1f, 0x81, 0x96, 0x22, 0x18, 0x9d, 0x1d, 0x32, 0x54, 0x6f, 0xa5, 0xa9, 0xae, 0xab, 0xa8, - 0xb2, 0xd5, 0xe3, 0x2f, 0x47, 0xc4, 0xd0, 0x79, 0x5f, 0x8e, 0xd0, 0x74, 0x41, 0xcc, 0x81, 0x4b, - 0x9c, 0x58, 0xb6, 0x34, 0x91, 0x2b, 0xd8, 0xb7, 0xd3, 0xd4, 0xaf, 0x4e, 0xa9, 0x7b, 0x24, 0xe5, - 0xfc, 0x96, 0xe4, 0x3d, 0xf4, 0x2d, 0xf7, 0x48, 0x49, 0x7d, 0x35, 0x49, 0xbd, 0x29, 0x17, 0x3e, - 0x86, 0x4e, 0x62, 0xe1, 0xae, 0xef, 0x23, 0xb5, 0x82, 0xdf, 0x4c, 0xf3, 0x96, 0xf2, 0xa9, 0x89, - 0xb5, 0x92, 0xec, 0x6f, 0xca, 0xd0, 0x79, 0x8f, 0xb8, 0xe9, 0x1a, 0x2f, 0x86, 0xcd, 0x63, 0xa6, - 0xc1, 0x66, 0x54, 0x77, 0x32, 0x83, 0xe1, 0xa1, 0x99, 0xaa, 0xf4, 0xbf, 0x9c, 0x55, 0xf8, 0x6c, - 0x82, 0xd3, 0x2d, 0x18, 0xfa, 0x71, 0x5e, 0xf2, 0x63, 0xc3, 0x65, 0x9a, 0x30, 0x98, 0x7d, 0x14, - 0x22, 0xf5, 0x4e, 0xfc, 0x19, 0x5e, 0x4d, 0xee, 0x94, 0x7f, 0x4c, 0xee, 0x16, 0x8c, 0x8d, 0x41, - 0xfe, 0x21, 0xfa, 0x10, 0x36, 0x9e, 0x0c, 0xb1, 0x3f, 0x52, 0xef, 0x54, 0xce, 0xbe, 0xc9, 0xf7, - 0x29, 0xb6, 0x72, 0x9b, 0x8b, 0x4f, 0xd4, 0x20, 0xcd, 0x84, 0x75, 0x0f, 0x85, 0xc7, 0xea, 0x2d, - 0x78, 0xf1, 0x63, 0x6b, 0xdc, 0x0a, 0x95, 0x3b, 0xac, 0x79, 0x4a, 0x48, 0xdc, 0x24, 0xf9, 0xbc, - 0x04, 0xfa, 0x1e, 0x1a, 0x86, 0xc7, 0x3b, 0xbb, 0xbd, 0x1e, 0x0e, 0x82, 0x3b, 0xa4, 0x8f, 0xa7, - 0xf5, 0x39, 0x06, 0x36, 0x79, 0x2a, 0xab, 0xf2, 0xf4, 0xbf, 0xf6, 0x06, 0x0d, 0x08, 0xc4, 0xc3, - 0xf2, 0x48, 0x94, 0x2a, 0x8d, 0x70, 0xea, 0x07, 0x0c, 0x6e, 0x08, 0x3c, 0x9a, 0x35, 0xd1, 0x69, - 0xe2, 0x5b, 0xdf, 0x67, 0xfd, 0x09, 0x93, 0xfa, 0x6f, 0x71, 0x20, 0x4a, 0x01, 0x1e, 0xfb, 0x36, - 0x4d, 0x60, 0x42, 0xf2, 0x29, 0xe6, 0x48, 0x3c, 0xff, 0x6c, 0xb0, 0x09, 0x0a, 0x1c, 0x0b, 0x1e, - 0xb5, 0xd9, 0x32, 0xef, 0xb9, 0x82, 0xdf, 0x5f, 0x8a, 0xb0, 0x2e, 0x64, 0xe4, 0x79, 0xf6, 0x2c, - 0x1d, 0x95, 0xe7, 0x23, 0xa4, 0xd4, 0x73, 0x57, 0x26, 0x3f, 0x77, 0x75, 0xb6, 0xe7, 0x9e, 0xab, - 0xa7, 0xf1, 0xc3, 0x12, 0xac, 0x71, 0xc6, 0x1e, 0x38, 0xf4, 0xb9, 0xad, 0xf0, 0x3f, 0x4d, 0x33, - 0xfe, 0x05, 0x42, 0xf8, 0x73, 0x51, 0x0a, 0x61, 0x1f, 0x05, 0xc1, 0x53, 0xe2, 0xf7, 0xff, 0x07, - 0xde, 0xfc, 0xc7, 0xb0, 0x98, 0xe4, 0xeb, 0x19, 0xfa, 0x3d, 0x2c, 0x42, 0xe4, 0x24, 0xdc, 0x3f, - 0xaf, 0x40, 0x73, 0xcf, 0xc3, 0x3e, 0x92, 0x87, 0x4d, 0x56, 0xb7, 0x2f, 0xb2, 0x3a, 0x2d, 0x2f, - 0xd3, 0xeb, 0x50, 0x0f, 0x86, 0x8e, 0x83, 0xfc, 0x91, 0xcc, 0xb9, 0xc5, 0x70, 0x86, 0x9c, 0x3b, - 0x53, 0xae, 0xad, 0xcc, 0x55, 0xae, 0x7d, 0x09, 0x16, 0x89, 0xe4, 0xcd, 0xb4, 0xfa, 0x52, 0xbc, - 0xd1, 0xdc, 0x83, 0x7e, 0xaa, 0xf7, 0x53, 0x1b, 0xeb, 0xfd, 0x24, 0x7b, 0x46, 0xf5, 0xb1, 0x9e, - 0xd1, 0x57, 0x52, 0x3d, 0x9b, 0x06, 0x13, 0xdd, 0x86, 0x32, 0x3d, 0xe3, 0xa1, 0x3e, 0xd9, 0xad, - 0x79, 0x33, 0xd9, 0xad, 0x69, 0x66, 0x33, 0x3b, 0x99, 0xe0, 0xa4, 0x7a, 0x34, 0x89, 0xd6, 0x16, - 0xa4, 0x5b, 0x5b, 0x97, 0x01, 0xfa, 0xd8, 0xf3, 0x71, 0x0f, 0x85, 0xb8, 0x2f, 0x4e, 0xbd, 0x89, - 0x99, 0xb3, 0x75, 0x77, 0x54, 0xea, 0xd7, 0x9a, 0x47, 0xfd, 0x7e, 0x59, 0x84, 0x66, 0x9c, 0x45, - 0xdc, 0x86, 0xf6, 0x21, 0xe9, 0x27, 0xe2, 0xad, 0x48, 0x1c, 0x52, 0x09, 0x5e, 0x2a, 0xf1, 0xe8, - 0x16, 0x8c, 0xd6, 0x61, 0x2a, 0x13, 0x79, 0x08, 0x9a, 0x4b, 0x5c, 0x73, 0x8c, 0x0e, 0x4f, 0x0b, - 0x2e, 0xa5, 0x98, 0x1a, 0xcb, 0x61, 0xba, 0x05, 0xa3, 0xe3, 0x8e, 0xcd, 0xc5, 0xd1, 0xf3, 0x08, - 0x56, 0x55, 0x7d, 0x36, 0x6d, 0x6f, 0xb2, 0xbd, 0x6c, 0x64, 0xc4, 0x10, 0x27, 0xe6, 0x6a, 0x93, - 0xf9, 0xac, 0x08, 0xed, 0xb4, 0x76, 0x68, 0x5f, 0x82, 0xe6, 0xb8, 0x44, 0xd4, 0xb9, 0x7e, 0xb7, - 0x60, 0xc4, 0x98, 0x54, 0x9a, 0x9f, 0x04, 0xc4, 0xa5, 0x67, 0x30, 0x7e, 0x22, 0x53, 0xa5, 0xcb, - 0xa9, 0x23, 0x1b, 0x95, 0xe6, 0x27, 0xc9, 0x89, 0xf8, 0xf9, 0x7f, 0x5f, 0x86, 0x46, 0x74, 0x74, - 0x50, 0x9c, 0xec, 0x5e, 0x83, 0xf2, 0x11, 0x0e, 0x55, 0x27, 0x91, 0xc8, 0xfe, 0x0d, 0x8a, 0x41, - 0x11, 0xbd, 0x61, 0x28, 0xfc, 0x63, 0x1e, 0xa2, 0x37, 0x0c, 0xb5, 0xeb, 0x50, 0xf1, 0x48, 0x20, - 0x3b, 0x40, 0x39, 0x98, 0x0c, 0x45, 0xbb, 0x09, 0xb5, 0x3e, 0xb6, 0x71, 0x88, 0xc5, 0x89, 0x3a, - 0x07, 0x59, 0x20, 0x69, 0xb7, 0xa0, 0x4e, 0x3c, 0xde, 0x86, 0xac, 0x4d, 0xc2, 0x97, 0x58, 0x94, - 0x15, 0x9a, 0x92, 0x8a, 0x22, 0x57, 0x1e, 0x2b, 0x14, 0x85, 0x9e, 0xc9, 0x3c, 0x14, 0xf6, 0x8e, - 0x45, 0xfb, 0x22, 0x07, 0x97, 0xe3, 0x8c, 0xb9, 0x89, 0xe6, 0x5c, 0x6e, 0xe2, 0xcc, 0x1d, 0xa4, - 0xbf, 0x56, 0x61, 0x4d, 0x9d, 0x4d, 0x9e, 0xd7, 0x18, 0xcf, 0x6b, 0x8c, 0xff, 0xed, 0x35, 0xc6, - 0xa7, 0x50, 0x65, 0x17, 0x34, 0x94, 0x94, 0x8a, 0x73, 0x50, 0xd2, 0x6e, 0x42, 0x85, 0xdd, 0x36, - 0x29, 0xb1, 0x45, 0xeb, 0x0a, 0x87, 0x2f, 0xea, 0x26, 0x0c, 0x6d, 0xeb, 0x67, 0x55, 0x58, 0x1a, - 0xd3, 0xda, 0xf3, 0x9e, 0xd4, 0x79, 0x4f, 0xea, 0x4c, 0x3d, 0x29, 0x95, 0x0e, 0x6b, 0xf3, 0x58, - 0xc3, 0xb7, 0x01, 0xe2, 0x14, 0xe4, 0x39, 0xdf, 0xf9, 0xfa, 0x55, 0x0d, 0x2e, 0xe6, 0x14, 0x46, - 0xce, 0xaf, 0x29, 0x9c, 0x5f, 0x53, 0x38, 0xbf, 0xa6, 0x10, 0x9b, 0xe1, 0xdf, 0x8b, 0xd0, 0x88, - 0xca, 0xe9, 0xd3, 0x2f, 0x76, 0x6d, 0x47, 0xdd, 0x19, 0x9e, 0x76, 0xaf, 0x65, 0x6b, 0xd6, 0x2c, - 0xf0, 0xc8, 0xab, 0xaf, 0x37, 0xa1, 0xce, 0x2b, 0xab, 0x32, 0x78, 0xac, 0x64, 0x0b, 0xb2, 0x81, - 0x21, 0x71, 0xb4, 0x37, 0xa0, 0x21, 0xae, 0x2b, 0xc9, 0x93, 0xf5, 0x6a, 0xfa, 0x64, 0xcd, 0x61, - 0x46, 0x84, 0x75, 0xf6, 0x3b, 0xcd, 0x18, 0x56, 0x14, 0x97, 0x11, 0xb5, 0xf7, 0x26, 0x3b, 0xa4, - 0x6c, 0xcc, 0x8d, 0x5a, 0x0b, 0x6a, 0x97, 0xf4, 0x93, 0x22, 0xb4, 0xd2, 0x5d, 0x86, 0x1d, 0xea, - 0x88, 0xf8, 0x44, 0x74, 0x7b, 0x5c, 0x71, 0xe6, 0xee, 0x16, 0x8c, 0x08, 0xef, 0xf9, 0x9e, 0xaf, - 0x7e, 0x5a, 0x84, 0x66, 0x74, 0xb2, 0xd7, 0xee, 0x40, 0x4b, 0x6e, 0x63, 0xf6, 0x48, 0x1f, 0x8b, - 0x07, 0xbd, 0x9c, 0xfb, 0xa0, 0xbc, 0xdb, 0xb1, 0x28, 0x17, 0xdd, 0x21, 0x7d, 0x75, 0x2b, 0xb0, - 0x34, 0xcf, 0xdb, 0xf8, 0x75, 0x13, 0x6a, 0xc2, 0x51, 0x2b, 0x4e, 0x7c, 0x79, 0x09, 0x4a, 0xd4, - 0x5b, 0x2d, 0x4f, 0xb8, 0xf4, 0x57, 0x99, 0x78, 0xe9, 0x6f, 0x5a, 0xe2, 0x31, 0x66, 0x89, 0xb5, - 0x8c, 0x25, 0x26, 0x5c, 0x62, 0x7d, 0x06, 0x97, 0xd8, 0x98, 0xee, 0x12, 0x9b, 0x33, 0xb8, 0x44, - 0x98, 0xc9, 0x25, 0x2e, 0x4c, 0x76, 0x89, 0x8b, 0x13, 0x5c, 0x62, 0x6b, 0x82, 0x4b, 0x6c, 0x4f, - 0x72, 0x89, 0x4b, 0x53, 0x5c, 0x62, 0x27, 0xeb, 0x12, 0x5f, 0x81, 0x36, 0x25, 0x9e, 0x30, 0x36, - 0x7e, 0x12, 0x68, 0x39, 0xe8, 0x34, 0x91, 0x2b, 0x50, 0x34, 0xcb, 0x4d, 0xa2, 0x69, 0x02, 0xcd, - 0x72, 0x13, 0x68, 0xc9, 0x40, 0xbf, 0x32, 0x76, 0x4d, 0x73, 0xa6, 0x13, 0xc1, 0x47, 0x79, 0x2e, - 0xe0, 0x42, 0xb6, 0xb5, 0x94, 0xf7, 0xe9, 0x89, 0xda, 0x1b, 0x68, 0xd7, 0x44, 0xd8, 0x5f, 0xcb, - 0xda, 0xfd, 0xa3, 0x91, 0x87, 0x79, 0xee, 0xce, 0x92, 0x81, 0xd7, 0x65, 0xd0, 0xbf, 0x98, 0x3d, - 0xdc, 0x47, 0x4d, 0x73, 0x19, 0xee, 0xaf, 0x43, 0x0d, 0xd9, 0x36, 0xd5, 0x4f, 0x3d, 0xb7, 0x77, - 0x5e, 0x45, 0xb6, 0xbd, 0x37, 0xd0, 0xbe, 0x0c, 0x90, 0x78, 0xa2, 0xf5, 0xac, 0x33, 0x8f, 0xb9, - 0x35, 0x12, 0x98, 0xda, 0xcb, 0xd0, 0xea, 0x5b, 0xd4, 0x82, 0x1c, 0xcb, 0x45, 0x21, 0xf1, 0xf5, - 0x0d, 0xa6, 0x20, 0xe9, 0xc9, 0xf4, 0x95, 0xd7, 0xcd, 0xb1, 0x2b, 0xaf, 0x2f, 0x41, 0xf9, 0xd4, - 0xb1, 0xf5, 0x4b, 0x59, 0x8b, 0xfb, 0xd0, 0xb1, 0x0d, 0x0a, 0xcb, 0x96, 0x59, 0x5f, 0x78, 0xd6, - 0x5b, 0xb1, 0x97, 0x9f, 0xe1, 0x56, 0xec, 0x8b, 0xf3, 0x78, 0xac, 0x1f, 0x00, 0xc4, 0x71, 0x6f, - 0xce, 0x2f, 0x8d, 0xde, 0x86, 0x85, 0x81, 0x65, 0x63, 0x33, 0x3f, 0xa4, 0xc6, 0x37, 0x9e, 0xbb, - 0x05, 0x03, 0x06, 0xd1, 0x28, 0xf6, 0xe2, 0x21, 0xac, 0x28, 0xba, 0xb9, 0xda, 0x77, 0x27, 0xc7, - 0xaf, 0x6b, 0xd9, 0x84, 0x3a, 0xa7, 0x25, 0xac, 0x0e, 0x67, 0x7f, 0xaa, 0xc0, 0xc5, 0xbc, 0x66, - 0xb4, 0x03, 0x2f, 0x1c, 0xa2, 0xc0, 0xea, 0x99, 0x28, 0xf5, 0x95, 0x90, 0x19, 0xd5, 0x7c, 0xb9, - 0x68, 0x5e, 0x4b, 0x55, 0x58, 0xf3, 0xbf, 0x2a, 0xea, 0x16, 0x8c, 0xcd, 0xc3, 0x09, 0x1f, 0x1d, - 0xdd, 0x87, 0x0e, 0xf2, 0x2c, 0xf3, 0x53, 0x3c, 0x8a, 0x77, 0xe0, 0x92, 0x4c, 0xd5, 0xb5, 0xd2, - 0x5f, 0x59, 0x75, 0x0b, 0x46, 0x1b, 0xa5, 0xbf, 0xbb, 0xfa, 0x1e, 0xe8, 0x84, 0xb5, 0x25, 0x4c, - 0x4b, 0x34, 0xa4, 0x62, 0x7a, 0xe5, 0x6c, 0x57, 0x54, 0xdd, 0xbb, 0xea, 0x16, 0x8c, 0x35, 0xa2, - 0xee, 0x6a, 0xc5, 0xf4, 0x3d, 0xd1, 0xeb, 0x89, 0xe9, 0x57, 0xf2, 0xe8, 0x8f, 0xb7, 0x85, 0x62, - 0xfa, 0x99, 0x86, 0xd1, 0x11, 0x6c, 0x0a, 0xfa, 0x28, 0x6e, 0x24, 0xc6, 0x5b, 0xf0, 0x00, 0xf7, - 0x4a, 0x76, 0x0b, 0x45, 0xdb, 0xb1, 0x5b, 0x30, 0xd6, 0x49, 0x6e, 0x4f, 0x12, 0xc7, 0x1b, 0xb1, - 0xae, 0x2e, 0x4b, 0x17, 0xe2, 0x8d, 0x6a, 0x59, 0xef, 0x98, 0xd7, 0x03, 0xee, 0x16, 0x0c, 0x21, - 0x93, 0x2c, 0x2c, 0xd6, 0xf0, 0xe3, 0x58, 0xc3, 0x13, 0x2d, 0x01, 0xed, 0xfd, 0xc9, 0x1a, 0x7e, - 0x29, 0xa7, 0x6d, 0xc4, 0x2f, 0x16, 0xa8, 0xb5, 0xfa, 0x2a, 0x2c, 0x24, 0x6f, 0x2e, 0xac, 0xc6, - 0x1f, 0xf7, 0x95, 0xe3, 0x3b, 0x0e, 0xbf, 0x2d, 0x42, 0xf9, 0x11, 0x52, 0xdf, 0x8a, 0x98, 0xfe, - 0xb1, 0x5b, 0xc6, 0xb3, 0x95, 0xcf, 0xfc, 0x8d, 0xc8, 0x5c, 0x5f, 0x70, 0x5d, 0x81, 0x86, 0x8c, - 0x30, 0x39, 0xcf, 0xf7, 0x31, 0x2c, 0x7d, 0x30, 0x56, 0x6f, 0x7a, 0x8e, 0x1f, 0x93, 0xfc, 0xae, - 0x08, 0xe5, 0x0f, 0x1d, 0x5b, 0x29, 0xbd, 0x4b, 0xd0, 0xa4, 0xbf, 0x81, 0x87, 0x7a, 0xf2, 0x5e, - 0x49, 0x3c, 0x41, 0x93, 0x3f, 0xcf, 0xc7, 0x03, 0xeb, 0x54, 0x64, 0x79, 0x62, 0x44, 0x57, 0xa1, - 0x30, 0xf4, 0xad, 0xc3, 0x61, 0x88, 0xc5, 0x67, 0x7a, 0xf1, 0x04, 0x4d, 0x65, 0x9e, 0xfa, 0xc8, - 0xf3, 0x70, 0x5f, 0x1c, 0xc1, 0xe5, 0xf0, 0xcc, 0x7d, 0xcc, 0xdb, 0xaf, 0x42, 0x9b, 0xf8, 0x47, - 0x12, 0xd7, 0x3c, 0xd9, 0xb9, 0xbd, 0x28, 0xbe, 0x5d, 0xdd, 0xf7, 0x49, 0x48, 0xf6, 0x8b, 0xbf, - 0x28, 0x95, 0xf7, 0x76, 0x0f, 0x0e, 0x6b, 0xec, 0x63, 0xd0, 0x37, 0xff, 0x19, 0x00, 0x00, 0xff, - 0xff, 0xd4, 0x0a, 0xef, 0xca, 0xe4, 0x3a, 0x00, 0x00, +func (x *Xml) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +var File_openapiv2_OpenAPIv2_proto protoreflect.FileDescriptor + +var file_openapiv2_OpenAPIv2_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x4f, 0x70, 0x65, 0x6e, + 0x41, 0x50, 0x49, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x6d, 0x0a, 0x18, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2c, + 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x07, + 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, + 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, + 0x66, 0x22, 0x45, 0x0a, 0x03, 0x41, 0x6e, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x22, 0xab, 0x01, 0x0a, 0x0e, 0x41, 0x70, 0x69, + 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x01, 0x0a, 0x1b, 0x42, 0x61, 0x73, 0x69, 0x63, + 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, + 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, + 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xde, 0x01, + 0x0a, 0x0d, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x3f, 0x0a, + 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x86, + 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x54, 0x0a, 0x07, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x12, 0x49, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x5b, 0x0a, + 0x0b, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x15, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xe8, 0x05, 0x0a, 0x08, 0x44, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, + 0x72, 0x12, 0x24, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, + 0x61, 0x73, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x62, 0x61, 0x73, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, + 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x05, 0x70, 0x61, + 0x74, 0x68, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x05, 0x70, 0x61, + 0x74, 0x68, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x40, + 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x12, 0x3d, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, + 0x3b, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x52, 0x0a, 0x14, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x23, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x67, 0x52, + 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x44, 0x6f, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x55, 0x0a, 0x08, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x73, 0x12, 0x49, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x83, 0x01, 0x0a, + 0x0c, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, + 0x6c, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0xff, 0x02, 0x0a, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, + 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x29, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xab, 0x06, 0x0a, 0x1a, 0x46, 0x6f, 0x72, 0x6d, 0x44, 0x61, 0x74, + 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, + 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, + 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, + 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, + 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, + 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, + 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, + 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, + 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, + 0x6e, 0x75, 0x6d, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, + 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, + 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, + 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0xab, 0x05, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, + 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, + 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, + 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, + 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, + 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, + 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, + 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, + 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, + 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, + 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, + 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x11, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, + 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0xfd, 0x05, 0x0a, 0x18, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, + 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, + 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, + 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, + 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, + 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, + 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, + 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x10, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, + 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, + 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, + 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, + 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, + 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, + 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, + 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x15, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, + 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, + 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0x57, 0x0a, 0x07, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x4c, 0x0a, 0x15, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa1, 0x02, 0x0a, 0x04, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x5f, 0x6f, 0x66, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2d, + 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x2d, 0x0a, + 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x63, 0x65, + 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x10, + 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, + 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x37, 0x0a, + 0x09, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x44, 0x0a, 0x0d, 0x4a, 0x73, 0x6f, 0x6e, 0x52, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x70, 0x0a, 0x07, + 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, + 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x3f, 0x0a, + 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x45, + 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4b, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x51, 0x0a, 0x0e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, + 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x59, 0x0a, 0x12, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x4b, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x6d, 0x0a, 0x1c, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x37, + 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x55, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xb5, + 0x03, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x1b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, + 0x52, 0x18, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x6c, 0x0a, 0x1e, 0x66, 0x6f, + 0x72, 0x6d, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x46, 0x6f, 0x72, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x1a, 0x66, 0x6f, + 0x72, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, + 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x62, 0x0a, 0x1a, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x48, 0x00, 0x52, 0x17, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5f, 0x0a, 0x19, + 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, + 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, + 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x16, 0x70, 0x61, 0x74, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x42, 0x07, 0x0a, + 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xa1, 0x02, 0x0a, 0x18, 0x4f, 0x61, 0x75, 0x74, 0x68, + 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x2b, 0x0a, + 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf5, 0x01, 0x0a, 0x19, 0x4f, + 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, + 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, + 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, + 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x82, 0x02, 0x0a, 0x16, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, + 0x6c, 0x69, 0x63, 0x69, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, + 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf2, 0x01, 0x0a, 0x16, 0x4f, 0x61, 0x75, 0x74, + 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5c, 0x0a, 0x0c, + 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x4c, 0x0a, 0x15, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x9e, 0x04, 0x0a, 0x09, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, + 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, + 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, + 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, + 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, + 0x65, 0x6d, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x33, + 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x3b, 0x0a, + 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, + 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0d, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, + 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x09, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x62, 0x6f, 0x64, + 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, + 0x62, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x4c, 0x0a, + 0x12, 0x6e, 0x6f, 0x6e, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x6e, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x6e, 0x6f, 0x6e, 0x42, 0x6f, + 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x42, 0x07, 0x0a, 0x05, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x67, 0x0a, 0x14, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, 0x15, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x94, 0x01, + 0x0a, 0x0e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, 0x65, 0x6d, + 0x12, 0x35, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4a, 0x73, 0x6f, + 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x6a, 0x73, + 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xcf, 0x03, 0x0a, 0x08, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, + 0x6d, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x52, 0x65, 0x66, 0x12, 0x27, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, + 0x03, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, 0x29, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x70, 0x6f, 0x73, + 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x29, 0x0a, 0x04, 0x68, 0x65, 0x61, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x68, 0x65, 0x61, 0x64, 0x12, 0x2b, 0x0a, 0x05, + 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfb, 0x05, 0x0a, 0x16, 0x50, 0x61, 0x74, 0x68, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, + 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, + 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, + 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, + 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, + 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, + 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, + 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, + 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, + 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, + 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, + 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, + 0x66, 0x18, 0x15, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, + 0x65, 0x4f, 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x77, 0x0a, 0x05, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x3f, 0x0a, + 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, + 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, + 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x92, 0x05, + 0x0a, 0x0f, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, + 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, + 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, + 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, + 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, + 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, + 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, + 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, + 0x6e, 0x75, 0x6d, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, + 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, + 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x5a, 0x0a, 0x0a, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x12, 0x4c, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa8, + 0x06, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, + 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, + 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, + 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, + 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, + 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, + 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, + 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, + 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, + 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, + 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, + 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, + 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, + 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfe, 0x01, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x49, 0x74, 0x65, 0x6d, + 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x2d, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x52, 0x07, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x52, + 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x65, 0x0a, 0x13, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x4e, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x22, 0x90, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4a, 0x73, 0x6f, + 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x6a, 0x73, + 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xaf, 0x09, 0x0a, 0x06, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, + 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, + 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, + 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, + 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, + 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, + 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, + 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, + 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, + 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, + 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, + 0x13, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, + 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, + 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x59, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x15, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, + 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x49, + 0x74, 0x65, 0x6d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, + 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x29, 0x0a, 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6f, 0x66, + 0x18, 0x18, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4f, + 0x66, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, + 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73, + 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x21, 0x0a, 0x03, + 0x78, 0x6d, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x58, 0x6d, 0x6c, 0x52, 0x03, 0x78, 0x6d, 0x6c, 0x12, + 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, + 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, + 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x29, + 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x1f, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7e, 0x0a, 0x0a, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x39, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x74, 0x0a, 0x13, 0x53, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x5d, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x22, 0xe9, 0x04, 0x0a, 0x17, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x6d, 0x0a, 0x1d, + 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x1b, + 0x62, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x46, 0x0a, 0x10, 0x61, + 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x48, 0x00, 0x52, 0x0e, 0x61, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x69, 0x6d, + 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, + 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x16, 0x6f, 0x61, 0x75, + 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x70, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x16, 0x6f, 0x61, 0x75, + 0x74, 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x1b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x61, 0x70, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, + 0x00, 0x52, 0x19, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x65, 0x0a, 0x1b, + 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x6f, + 0x64, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, + 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x68, 0x0a, 0x13, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x12, 0x51, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, + 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xbb, 0x01, 0x0a, 0x03, + 0x54, 0x61, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, + 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x08, 0x54, 0x79, 0x70, + 0x65, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5c, 0x0a, 0x0f, 0x56, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x49, + 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x03, 0x58, 0x6d, + 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1c, 0x0a, 0x09, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, + 0x70, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3c, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x14, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0xa2, 0x02, 0x03, 0x4f, + 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_openapiv2_OpenAPIv2_proto_rawDescOnce sync.Once + file_openapiv2_OpenAPIv2_proto_rawDescData = file_openapiv2_OpenAPIv2_proto_rawDesc +) + +func file_openapiv2_OpenAPIv2_proto_rawDescGZIP() []byte { + file_openapiv2_OpenAPIv2_proto_rawDescOnce.Do(func() { + file_openapiv2_OpenAPIv2_proto_rawDescData = protoimpl.X.CompressGZIP(file_openapiv2_OpenAPIv2_proto_rawDescData) + }) + return file_openapiv2_OpenAPIv2_proto_rawDescData +} + +var file_openapiv2_OpenAPIv2_proto_msgTypes = make([]protoimpl.MessageInfo, 60) +var file_openapiv2_OpenAPIv2_proto_goTypes = []interface{}{ + (*AdditionalPropertiesItem)(nil), // 0: openapi.v2.AdditionalPropertiesItem + (*Any)(nil), // 1: openapi.v2.Any + (*ApiKeySecurity)(nil), // 2: openapi.v2.ApiKeySecurity + (*BasicAuthenticationSecurity)(nil), // 3: openapi.v2.BasicAuthenticationSecurity + (*BodyParameter)(nil), // 4: openapi.v2.BodyParameter + (*Contact)(nil), // 5: openapi.v2.Contact + (*Default)(nil), // 6: openapi.v2.Default + (*Definitions)(nil), // 7: openapi.v2.Definitions + (*Document)(nil), // 8: openapi.v2.Document + (*Examples)(nil), // 9: openapi.v2.Examples + (*ExternalDocs)(nil), // 10: openapi.v2.ExternalDocs + (*FileSchema)(nil), // 11: openapi.v2.FileSchema + (*FormDataParameterSubSchema)(nil), // 12: openapi.v2.FormDataParameterSubSchema + (*Header)(nil), // 13: openapi.v2.Header + (*HeaderParameterSubSchema)(nil), // 14: openapi.v2.HeaderParameterSubSchema + (*Headers)(nil), // 15: openapi.v2.Headers + (*Info)(nil), // 16: openapi.v2.Info + (*ItemsItem)(nil), // 17: openapi.v2.ItemsItem + (*JsonReference)(nil), // 18: openapi.v2.JsonReference + (*License)(nil), // 19: openapi.v2.License + (*NamedAny)(nil), // 20: openapi.v2.NamedAny + (*NamedHeader)(nil), // 21: openapi.v2.NamedHeader + (*NamedParameter)(nil), // 22: openapi.v2.NamedParameter + (*NamedPathItem)(nil), // 23: openapi.v2.NamedPathItem + (*NamedResponse)(nil), // 24: openapi.v2.NamedResponse + (*NamedResponseValue)(nil), // 25: openapi.v2.NamedResponseValue + (*NamedSchema)(nil), // 26: openapi.v2.NamedSchema + (*NamedSecurityDefinitionsItem)(nil), // 27: openapi.v2.NamedSecurityDefinitionsItem + (*NamedString)(nil), // 28: openapi.v2.NamedString + (*NamedStringArray)(nil), // 29: openapi.v2.NamedStringArray + (*NonBodyParameter)(nil), // 30: openapi.v2.NonBodyParameter + (*Oauth2AccessCodeSecurity)(nil), // 31: openapi.v2.Oauth2AccessCodeSecurity + (*Oauth2ApplicationSecurity)(nil), // 32: openapi.v2.Oauth2ApplicationSecurity + (*Oauth2ImplicitSecurity)(nil), // 33: openapi.v2.Oauth2ImplicitSecurity + (*Oauth2PasswordSecurity)(nil), // 34: openapi.v2.Oauth2PasswordSecurity + (*Oauth2Scopes)(nil), // 35: openapi.v2.Oauth2Scopes + (*Operation)(nil), // 36: openapi.v2.Operation + (*Parameter)(nil), // 37: openapi.v2.Parameter + (*ParameterDefinitions)(nil), // 38: openapi.v2.ParameterDefinitions + (*ParametersItem)(nil), // 39: openapi.v2.ParametersItem + (*PathItem)(nil), // 40: openapi.v2.PathItem + (*PathParameterSubSchema)(nil), // 41: openapi.v2.PathParameterSubSchema + (*Paths)(nil), // 42: openapi.v2.Paths + (*PrimitivesItems)(nil), // 43: openapi.v2.PrimitivesItems + (*Properties)(nil), // 44: openapi.v2.Properties + (*QueryParameterSubSchema)(nil), // 45: openapi.v2.QueryParameterSubSchema + (*Response)(nil), // 46: openapi.v2.Response + (*ResponseDefinitions)(nil), // 47: openapi.v2.ResponseDefinitions + (*ResponseValue)(nil), // 48: openapi.v2.ResponseValue + (*Responses)(nil), // 49: openapi.v2.Responses + (*Schema)(nil), // 50: openapi.v2.Schema + (*SchemaItem)(nil), // 51: openapi.v2.SchemaItem + (*SecurityDefinitions)(nil), // 52: openapi.v2.SecurityDefinitions + (*SecurityDefinitionsItem)(nil), // 53: openapi.v2.SecurityDefinitionsItem + (*SecurityRequirement)(nil), // 54: openapi.v2.SecurityRequirement + (*StringArray)(nil), // 55: openapi.v2.StringArray + (*Tag)(nil), // 56: openapi.v2.Tag + (*TypeItem)(nil), // 57: openapi.v2.TypeItem + (*VendorExtension)(nil), // 58: openapi.v2.VendorExtension + (*Xml)(nil), // 59: openapi.v2.Xml + (*any.Any)(nil), // 60: google.protobuf.Any +} +var file_openapiv2_OpenAPIv2_proto_depIdxs = []int32{ + 50, // 0: openapi.v2.AdditionalPropertiesItem.schema:type_name -> openapi.v2.Schema + 60, // 1: openapi.v2.Any.value:type_name -> google.protobuf.Any + 20, // 2: openapi.v2.ApiKeySecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 3: openapi.v2.BasicAuthenticationSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 50, // 4: openapi.v2.BodyParameter.schema:type_name -> openapi.v2.Schema + 20, // 5: openapi.v2.BodyParameter.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 6: openapi.v2.Contact.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 7: openapi.v2.Default.additional_properties:type_name -> openapi.v2.NamedAny + 26, // 8: openapi.v2.Definitions.additional_properties:type_name -> openapi.v2.NamedSchema + 16, // 9: openapi.v2.Document.info:type_name -> openapi.v2.Info + 42, // 10: openapi.v2.Document.paths:type_name -> openapi.v2.Paths + 7, // 11: openapi.v2.Document.definitions:type_name -> openapi.v2.Definitions + 38, // 12: openapi.v2.Document.parameters:type_name -> openapi.v2.ParameterDefinitions + 47, // 13: openapi.v2.Document.responses:type_name -> openapi.v2.ResponseDefinitions + 54, // 14: openapi.v2.Document.security:type_name -> openapi.v2.SecurityRequirement + 52, // 15: openapi.v2.Document.security_definitions:type_name -> openapi.v2.SecurityDefinitions + 56, // 16: openapi.v2.Document.tags:type_name -> openapi.v2.Tag + 10, // 17: openapi.v2.Document.external_docs:type_name -> openapi.v2.ExternalDocs + 20, // 18: openapi.v2.Document.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 19: openapi.v2.Examples.additional_properties:type_name -> openapi.v2.NamedAny + 20, // 20: openapi.v2.ExternalDocs.vendor_extension:type_name -> openapi.v2.NamedAny + 1, // 21: openapi.v2.FileSchema.default:type_name -> openapi.v2.Any + 10, // 22: openapi.v2.FileSchema.external_docs:type_name -> openapi.v2.ExternalDocs + 1, // 23: openapi.v2.FileSchema.example:type_name -> openapi.v2.Any + 20, // 24: openapi.v2.FileSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 25: openapi.v2.FormDataParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 26: openapi.v2.FormDataParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 27: openapi.v2.FormDataParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 28: openapi.v2.FormDataParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 29: openapi.v2.Header.items:type_name -> openapi.v2.PrimitivesItems + 1, // 30: openapi.v2.Header.default:type_name -> openapi.v2.Any + 1, // 31: openapi.v2.Header.enum:type_name -> openapi.v2.Any + 20, // 32: openapi.v2.Header.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 33: openapi.v2.HeaderParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 34: openapi.v2.HeaderParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 35: openapi.v2.HeaderParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 36: openapi.v2.HeaderParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 21, // 37: openapi.v2.Headers.additional_properties:type_name -> openapi.v2.NamedHeader + 5, // 38: openapi.v2.Info.contact:type_name -> openapi.v2.Contact + 19, // 39: openapi.v2.Info.license:type_name -> openapi.v2.License + 20, // 40: openapi.v2.Info.vendor_extension:type_name -> openapi.v2.NamedAny + 50, // 41: openapi.v2.ItemsItem.schema:type_name -> openapi.v2.Schema + 20, // 42: openapi.v2.License.vendor_extension:type_name -> openapi.v2.NamedAny + 1, // 43: openapi.v2.NamedAny.value:type_name -> openapi.v2.Any + 13, // 44: openapi.v2.NamedHeader.value:type_name -> openapi.v2.Header + 37, // 45: openapi.v2.NamedParameter.value:type_name -> openapi.v2.Parameter + 40, // 46: openapi.v2.NamedPathItem.value:type_name -> openapi.v2.PathItem + 46, // 47: openapi.v2.NamedResponse.value:type_name -> openapi.v2.Response + 48, // 48: openapi.v2.NamedResponseValue.value:type_name -> openapi.v2.ResponseValue + 50, // 49: openapi.v2.NamedSchema.value:type_name -> openapi.v2.Schema + 53, // 50: openapi.v2.NamedSecurityDefinitionsItem.value:type_name -> openapi.v2.SecurityDefinitionsItem + 55, // 51: openapi.v2.NamedStringArray.value:type_name -> openapi.v2.StringArray + 14, // 52: openapi.v2.NonBodyParameter.header_parameter_sub_schema:type_name -> openapi.v2.HeaderParameterSubSchema + 12, // 53: openapi.v2.NonBodyParameter.form_data_parameter_sub_schema:type_name -> openapi.v2.FormDataParameterSubSchema + 45, // 54: openapi.v2.NonBodyParameter.query_parameter_sub_schema:type_name -> openapi.v2.QueryParameterSubSchema + 41, // 55: openapi.v2.NonBodyParameter.path_parameter_sub_schema:type_name -> openapi.v2.PathParameterSubSchema + 35, // 56: openapi.v2.Oauth2AccessCodeSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 57: openapi.v2.Oauth2AccessCodeSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 35, // 58: openapi.v2.Oauth2ApplicationSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 59: openapi.v2.Oauth2ApplicationSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 35, // 60: openapi.v2.Oauth2ImplicitSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 61: openapi.v2.Oauth2ImplicitSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 35, // 62: openapi.v2.Oauth2PasswordSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 63: openapi.v2.Oauth2PasswordSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 28, // 64: openapi.v2.Oauth2Scopes.additional_properties:type_name -> openapi.v2.NamedString + 10, // 65: openapi.v2.Operation.external_docs:type_name -> openapi.v2.ExternalDocs + 39, // 66: openapi.v2.Operation.parameters:type_name -> openapi.v2.ParametersItem + 49, // 67: openapi.v2.Operation.responses:type_name -> openapi.v2.Responses + 54, // 68: openapi.v2.Operation.security:type_name -> openapi.v2.SecurityRequirement + 20, // 69: openapi.v2.Operation.vendor_extension:type_name -> openapi.v2.NamedAny + 4, // 70: openapi.v2.Parameter.body_parameter:type_name -> openapi.v2.BodyParameter + 30, // 71: openapi.v2.Parameter.non_body_parameter:type_name -> openapi.v2.NonBodyParameter + 22, // 72: openapi.v2.ParameterDefinitions.additional_properties:type_name -> openapi.v2.NamedParameter + 37, // 73: openapi.v2.ParametersItem.parameter:type_name -> openapi.v2.Parameter + 18, // 74: openapi.v2.ParametersItem.json_reference:type_name -> openapi.v2.JsonReference + 36, // 75: openapi.v2.PathItem.get:type_name -> openapi.v2.Operation + 36, // 76: openapi.v2.PathItem.put:type_name -> openapi.v2.Operation + 36, // 77: openapi.v2.PathItem.post:type_name -> openapi.v2.Operation + 36, // 78: openapi.v2.PathItem.delete:type_name -> openapi.v2.Operation + 36, // 79: openapi.v2.PathItem.options:type_name -> openapi.v2.Operation + 36, // 80: openapi.v2.PathItem.head:type_name -> openapi.v2.Operation + 36, // 81: openapi.v2.PathItem.patch:type_name -> openapi.v2.Operation + 39, // 82: openapi.v2.PathItem.parameters:type_name -> openapi.v2.ParametersItem + 20, // 83: openapi.v2.PathItem.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 84: openapi.v2.PathParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 85: openapi.v2.PathParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 86: openapi.v2.PathParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 87: openapi.v2.PathParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 88: openapi.v2.Paths.vendor_extension:type_name -> openapi.v2.NamedAny + 23, // 89: openapi.v2.Paths.path:type_name -> openapi.v2.NamedPathItem + 43, // 90: openapi.v2.PrimitivesItems.items:type_name -> openapi.v2.PrimitivesItems + 1, // 91: openapi.v2.PrimitivesItems.default:type_name -> openapi.v2.Any + 1, // 92: openapi.v2.PrimitivesItems.enum:type_name -> openapi.v2.Any + 20, // 93: openapi.v2.PrimitivesItems.vendor_extension:type_name -> openapi.v2.NamedAny + 26, // 94: openapi.v2.Properties.additional_properties:type_name -> openapi.v2.NamedSchema + 43, // 95: openapi.v2.QueryParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 96: openapi.v2.QueryParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 97: openapi.v2.QueryParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 98: openapi.v2.QueryParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 51, // 99: openapi.v2.Response.schema:type_name -> openapi.v2.SchemaItem + 15, // 100: openapi.v2.Response.headers:type_name -> openapi.v2.Headers + 9, // 101: openapi.v2.Response.examples:type_name -> openapi.v2.Examples + 20, // 102: openapi.v2.Response.vendor_extension:type_name -> openapi.v2.NamedAny + 24, // 103: openapi.v2.ResponseDefinitions.additional_properties:type_name -> openapi.v2.NamedResponse + 46, // 104: openapi.v2.ResponseValue.response:type_name -> openapi.v2.Response + 18, // 105: openapi.v2.ResponseValue.json_reference:type_name -> openapi.v2.JsonReference + 25, // 106: openapi.v2.Responses.response_code:type_name -> openapi.v2.NamedResponseValue + 20, // 107: openapi.v2.Responses.vendor_extension:type_name -> openapi.v2.NamedAny + 1, // 108: openapi.v2.Schema.default:type_name -> openapi.v2.Any + 1, // 109: openapi.v2.Schema.enum:type_name -> openapi.v2.Any + 0, // 110: openapi.v2.Schema.additional_properties:type_name -> openapi.v2.AdditionalPropertiesItem + 57, // 111: openapi.v2.Schema.type:type_name -> openapi.v2.TypeItem + 17, // 112: openapi.v2.Schema.items:type_name -> openapi.v2.ItemsItem + 50, // 113: openapi.v2.Schema.all_of:type_name -> openapi.v2.Schema + 44, // 114: openapi.v2.Schema.properties:type_name -> openapi.v2.Properties + 59, // 115: openapi.v2.Schema.xml:type_name -> openapi.v2.Xml + 10, // 116: openapi.v2.Schema.external_docs:type_name -> openapi.v2.ExternalDocs + 1, // 117: openapi.v2.Schema.example:type_name -> openapi.v2.Any + 20, // 118: openapi.v2.Schema.vendor_extension:type_name -> openapi.v2.NamedAny + 50, // 119: openapi.v2.SchemaItem.schema:type_name -> openapi.v2.Schema + 11, // 120: openapi.v2.SchemaItem.file_schema:type_name -> openapi.v2.FileSchema + 27, // 121: openapi.v2.SecurityDefinitions.additional_properties:type_name -> openapi.v2.NamedSecurityDefinitionsItem + 3, // 122: openapi.v2.SecurityDefinitionsItem.basic_authentication_security:type_name -> openapi.v2.BasicAuthenticationSecurity + 2, // 123: openapi.v2.SecurityDefinitionsItem.api_key_security:type_name -> openapi.v2.ApiKeySecurity + 33, // 124: openapi.v2.SecurityDefinitionsItem.oauth2_implicit_security:type_name -> openapi.v2.Oauth2ImplicitSecurity + 34, // 125: openapi.v2.SecurityDefinitionsItem.oauth2_password_security:type_name -> openapi.v2.Oauth2PasswordSecurity + 32, // 126: openapi.v2.SecurityDefinitionsItem.oauth2_application_security:type_name -> openapi.v2.Oauth2ApplicationSecurity + 31, // 127: openapi.v2.SecurityDefinitionsItem.oauth2_access_code_security:type_name -> openapi.v2.Oauth2AccessCodeSecurity + 29, // 128: openapi.v2.SecurityRequirement.additional_properties:type_name -> openapi.v2.NamedStringArray + 10, // 129: openapi.v2.Tag.external_docs:type_name -> openapi.v2.ExternalDocs + 20, // 130: openapi.v2.Tag.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 131: openapi.v2.VendorExtension.additional_properties:type_name -> openapi.v2.NamedAny + 20, // 132: openapi.v2.Xml.vendor_extension:type_name -> openapi.v2.NamedAny + 133, // [133:133] is the sub-list for method output_type + 133, // [133:133] is the sub-list for method input_type + 133, // [133:133] is the sub-list for extension type_name + 133, // [133:133] is the sub-list for extension extendee + 0, // [0:133] is the sub-list for field type_name +} + +func init() { file_openapiv2_OpenAPIv2_proto_init() } +func file_openapiv2_OpenAPIv2_proto_init() { + if File_openapiv2_OpenAPIv2_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_openapiv2_OpenAPIv2_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AdditionalPropertiesItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Any); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApiKeySecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BasicAuthenticationSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BodyParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Contact); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Default); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Definitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Document); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Examples); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExternalDocs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FormDataParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Header); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Headers); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Info); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ItemsItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JsonReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*License); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedAny); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedPathItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedResponseValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedSecurityDefinitionsItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedString); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedStringArray); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NonBodyParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2AccessCodeSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2ApplicationSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2ImplicitSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2PasswordSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2Scopes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Operation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Parameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParameterDefinitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParametersItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Paths); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrimitivesItems); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Properties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseDefinitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Responses); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SchemaItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityDefinitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityDefinitionsItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityRequirement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StringArray); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Tag); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypeItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VendorExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Xml); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*AdditionalPropertiesItem_Schema)(nil), + (*AdditionalPropertiesItem_Boolean)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[30].OneofWrappers = []interface{}{ + (*NonBodyParameter_HeaderParameterSubSchema)(nil), + (*NonBodyParameter_FormDataParameterSubSchema)(nil), + (*NonBodyParameter_QueryParameterSubSchema)(nil), + (*NonBodyParameter_PathParameterSubSchema)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[37].OneofWrappers = []interface{}{ + (*Parameter_BodyParameter)(nil), + (*Parameter_NonBodyParameter)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[39].OneofWrappers = []interface{}{ + (*ParametersItem_Parameter)(nil), + (*ParametersItem_JsonReference)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[48].OneofWrappers = []interface{}{ + (*ResponseValue_Response)(nil), + (*ResponseValue_JsonReference)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[51].OneofWrappers = []interface{}{ + (*SchemaItem_Schema)(nil), + (*SchemaItem_FileSchema)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[53].OneofWrappers = []interface{}{ + (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), + (*SecurityDefinitionsItem_ApiKeySecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_openapiv2_OpenAPIv2_proto_rawDesc, + NumEnums: 0, + NumMessages: 60, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_openapiv2_OpenAPIv2_proto_goTypes, + DependencyIndexes: file_openapiv2_OpenAPIv2_proto_depIdxs, + MessageInfos: file_openapiv2_OpenAPIv2_proto_msgTypes, + }.Build() + File_openapiv2_OpenAPIv2_proto = out.File + file_openapiv2_OpenAPIv2_proto_rawDesc = nil + file_openapiv2_OpenAPIv2_proto_goTypes = nil + file_openapiv2_OpenAPIv2_proto_depIdxs = nil } diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto index 557c88072..00ac1b0a0 100644 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2020 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -41,6 +41,9 @@ option java_package = "org.openapi_v2"; // the future. 'GPB' is reserved for the protocol buffer implementation itself. option objc_class_prefix = "OAS"; +// The Go package name. +option go_package = "openapiv2;openapi_v2"; + message AdditionalPropertiesItem { oneof oneof { Schema schema = 1; @@ -553,7 +556,7 @@ message Response { repeated NamedAny vendor_extension = 5; } -// One or more JSON representations for parameters +// One or more JSON representations for responses message ResponseDefinitions { repeated NamedResponse additional_properties = 1; } diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md index 836fb32a7..5276128d3 100644 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md @@ -1,16 +1,14 @@ # OpenAPI v2 Protocol Buffer Models -This directory contains a Protocol Buffer-language model -and related code for supporting OpenAPI v2. +This directory contains a Protocol Buffer-language model and related code for +supporting OpenAPI v2. -Gnostic applications and plugins can use OpenAPIv2.proto -to generate Protocol Buffer support code for their preferred languages. +Gnostic applications and plugins can use OpenAPIv2.proto to generate Protocol +Buffer support code for their preferred languages. -OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI -descriptions into the Protocol Buffer-based datastructures -generated from OpenAPIv2.proto. +OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI descriptions into +the Protocol Buffer-based datastructures generated from OpenAPIv2.proto. -OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic -compiler generator, and OpenAPIv2.pb.go is generated by -protoc, the Protocol Buffer compiler, and protoc-gen-go, the -Protocol Buffer Go code generation plugin. +OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic compiler +generator, and OpenAPIv2.pb.go is generated by protoc, the Protocol Buffer +compiler, and protoc-gen-go, the Protocol Buffer Go code generation plugin. diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/document.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/document.go new file mode 100644 index 000000000..ddeed5c89 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/document.go @@ -0,0 +1,26 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openapi_v2 + +import "github.com/googleapis/gnostic/compiler" + +// ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation. +func ParseDocument(b []byte) (*Document, error) { + info, err := compiler.ReadInfoFromBytes("", b) + if err != nil { + return nil, err + } + return NewDocument(info.Content[0], compiler.NewContextWithExtensions("$root", nil, nil)) +} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json index 2815a26ea..afa12b79b 100644 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json @@ -203,7 +203,7 @@ "additionalProperties": { "$ref": "#/definitions/response" }, - "description": "One or more JSON representations for parameters" + "description": "One or more JSON representations for responses" }, "externalDocs": { "type": "object", @@ -1607,4 +1607,4 @@ } } } -} +} \ No newline at end of file diff --git a/vendor/github.com/googleapis/gnostic/compiler/README.md b/vendor/github.com/googleapis/gnostic/compiler/README.md index 848b16c69..ee9783d23 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/README.md +++ b/vendor/github.com/googleapis/gnostic/compiler/README.md @@ -1,3 +1,4 @@ # Compiler support code -This directory contains compiler support code used by Gnostic and Gnostic extensions. \ No newline at end of file +This directory contains compiler support code used by Gnostic and Gnostic +extensions. diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/googleapis/gnostic/compiler/context.go index a64c1b75d..1250a6c59 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/context.go +++ b/vendor/github.com/googleapis/gnostic/compiler/context.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/googleapis/gnostic/compiler/error.go index d8672c100..7db23997b 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/error.go +++ b/vendor/github.com/googleapis/gnostic/compiler/error.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go deleted file mode 100644 index 1f85b650e..000000000 --- a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "bytes" - "fmt" - "os/exec" - - "strings" - - "errors" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" - ext_plugin "github.com/googleapis/gnostic/extensions" - yaml "gopkg.in/yaml.v2" -) - -// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. -type ExtensionHandler struct { - Name string -} - -// HandleExtension calls a binary extension handler. -func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) { - handled := false - var errFromPlugin error - var outFromPlugin *any.Any - - if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 { - for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) { - outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName) - if outFromPlugin == nil { - continue - } else { - handled = true - break - } - } - } - return handled, outFromPlugin, errFromPlugin -} - -func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) { - if extensionHandlers.Name != "" { - binary, _ := yaml.Marshal(in) - - request := &ext_plugin.ExtensionHandlerRequest{} - - version := &ext_plugin.Version{} - version.Major = 0 - version.Minor = 1 - version.Patch = 0 - request.CompilerVersion = version - - request.Wrapper = &ext_plugin.Wrapper{} - - request.Wrapper.Version = "v2" - request.Wrapper.Yaml = string(binary) - request.Wrapper.ExtensionName = extensionName - - requestBytes, _ := proto.Marshal(request) - cmd := exec.Command(extensionHandlers.Name) - cmd.Stdin = bytes.NewReader(requestBytes) - output, err := cmd.Output() - - if err != nil { - fmt.Printf("Error: %+v\n", err) - return nil, err - } - response := &ext_plugin.ExtensionHandlerResponse{} - err = proto.Unmarshal(output, response) - if err != nil { - fmt.Printf("Error: %+v\n", err) - fmt.Printf("%s\n", string(output)) - return nil, err - } - if !response.Handled { - return nil, nil - } - if len(response.Error) != 0 { - message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ",")) - return nil, errors.New(message) - } - return response.Value, nil - } - return nil, nil -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/extensions.go b/vendor/github.com/googleapis/gnostic/compiler/extensions.go new file mode 100644 index 000000000..20848a0a1 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/extensions.go @@ -0,0 +1,85 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "bytes" + "fmt" + "os/exec" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + extensions "github.com/googleapis/gnostic/extensions" + yaml "gopkg.in/yaml.v3" +) + +// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. +type ExtensionHandler struct { + Name string +} + +// CallExtension calls a binary extension handler. +func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) { + if context == nil || context.ExtensionHandlers == nil { + return false, nil, nil + } + handled = false + for _, handler := range *(context.ExtensionHandlers) { + response, err = handler.handle(in, extensionName) + if response == nil { + continue + } else { + handled = true + break + } + } + return handled, response, err +} + +func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) { + if extensionHandlers.Name != "" { + yamlData, _ := yaml.Marshal(in) + request := &extensions.ExtensionHandlerRequest{ + CompilerVersion: &extensions.Version{ + Major: 0, + Minor: 1, + Patch: 0, + }, + Wrapper: &extensions.Wrapper{ + Version: "unknown", // TODO: set this to the type/version of spec being parsed. + Yaml: string(yamlData), + ExtensionName: extensionName, + }, + } + requestBytes, _ := proto.Marshal(request) + cmd := exec.Command(extensionHandlers.Name) + cmd.Stdin = bytes.NewReader(requestBytes) + output, err := cmd.Output() + if err != nil { + return nil, err + } + response := &extensions.ExtensionHandlerResponse{} + err = proto.Unmarshal(output, response) + if err != nil || !response.Handled { + return nil, err + } + if len(response.Errors) != 0 { + return nil, fmt.Errorf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Errors, ",")) + } + return response.Value, nil + } + return nil, nil +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/googleapis/gnostic/compiler/helpers.go index 76df635ff..a580f40ac 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/helpers.go +++ b/vendor/github.com/googleapis/gnostic/compiler/helpers.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,56 +16,63 @@ package compiler import ( "fmt" - "gopkg.in/yaml.v2" "regexp" "sort" "strconv" + + "github.com/googleapis/gnostic/jsonschema" + "gopkg.in/yaml.v3" ) // compiler helper functions, usually called from generated code -// UnpackMap gets a yaml.MapSlice if possible. -func UnpackMap(in interface{}) (yaml.MapSlice, bool) { - m, ok := in.(yaml.MapSlice) - if ok { - return m, true - } - // do we have an empty array? - a, ok := in.([]interface{}) - if ok && len(a) == 0 { - // if so, return an empty map - return yaml.MapSlice{}, true +// UnpackMap gets a *yaml.Node if possible. +func UnpackMap(in *yaml.Node) (*yaml.Node, bool) { + if in == nil { + return nil, false } - return nil, false + return in, true } -// SortedKeysForMap returns the sorted keys of a yaml.MapSlice. -func SortedKeysForMap(m yaml.MapSlice) []string { +// SortedKeysForMap returns the sorted keys of a yamlv2.MapSlice. +func SortedKeysForMap(m *yaml.Node) []string { keys := make([]string, 0) - for _, item := range m { - keys = append(keys, item.Key.(string)) + if m.Kind == yaml.MappingNode { + for i := 0; i < len(m.Content); i += 2 { + keys = append(keys, m.Content[i].Value) + } } sort.Strings(keys) return keys } -// MapHasKey returns true if a yaml.MapSlice contains a specified key. -func MapHasKey(m yaml.MapSlice, key string) bool { - for _, item := range m { - itemKey, ok := item.Key.(string) - if ok && key == itemKey { - return true +// MapHasKey returns true if a yamlv2.MapSlice contains a specified key. +func MapHasKey(m *yaml.Node, key string) bool { + if m == nil { + return false + } + if m.Kind == yaml.MappingNode { + for i := 0; i < len(m.Content); i += 2 { + itemKey := m.Content[i].Value + if key == itemKey { + return true + } } } return false } // MapValueForKey gets the value of a map value for a specified key. -func MapValueForKey(m yaml.MapSlice, key string) interface{} { - for _, item := range m { - itemKey, ok := item.Key.(string) - if ok && key == itemKey { - return item.Value +func MapValueForKey(m *yaml.Node, key string) *yaml.Node { + if m == nil { + return nil + } + if m.Kind == yaml.MappingNode { + for i := 0; i < len(m.Content); i += 2 { + itemKey := m.Content[i].Value + if key == itemKey { + return m.Content[i+1] + } } } return nil @@ -83,8 +90,116 @@ func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { return stringArray } +// SequenceNodeForNode returns a node if it is a SequenceNode. +func SequenceNodeForNode(node *yaml.Node) (*yaml.Node, bool) { + if node.Kind != yaml.SequenceNode { + return nil, false + } + return node, true +} + +// BoolForScalarNode returns the bool value of a node. +func BoolForScalarNode(node *yaml.Node) (bool, bool) { + if node == nil { + return false, false + } + if node.Kind == yaml.DocumentNode { + return BoolForScalarNode(node.Content[0]) + } + if node.Kind != yaml.ScalarNode { + return false, false + } + if node.Tag != "!!bool" { + return false, false + } + v, err := strconv.ParseBool(node.Value) + if err != nil { + return false, false + } + return v, true +} + +// IntForScalarNode returns the integer value of a node. +func IntForScalarNode(node *yaml.Node) (int64, bool) { + if node == nil { + return 0, false + } + if node.Kind == yaml.DocumentNode { + return IntForScalarNode(node.Content[0]) + } + if node.Kind != yaml.ScalarNode { + return 0, false + } + if node.Tag != "!!int" { + return 0, false + } + v, err := strconv.ParseInt(node.Value, 10, 64) + if err != nil { + return 0, false + } + return v, true +} + +// FloatForScalarNode returns the float value of a node. +func FloatForScalarNode(node *yaml.Node) (float64, bool) { + if node == nil { + return 0.0, false + } + if node.Kind == yaml.DocumentNode { + return FloatForScalarNode(node.Content[0]) + } + if node.Kind != yaml.ScalarNode { + return 0.0, false + } + if (node.Tag != "!!int") && (node.Tag != "!!float") { + return 0.0, false + } + v, err := strconv.ParseFloat(node.Value, 64) + if err != nil { + return 0.0, false + } + return v, true +} + +// StringForScalarNode returns the string value of a node. +func StringForScalarNode(node *yaml.Node) (string, bool) { + if node == nil { + return "", false + } + if node.Kind == yaml.DocumentNode { + return StringForScalarNode(node.Content[0]) + } + switch node.Kind { + case yaml.ScalarNode: + switch node.Tag { + case "!!int": + return node.Value, true + case "!!str": + return node.Value, true + case "!!null": + return "", true + default: + return "", false + } + default: + return "", false + } +} + +// StringArrayForSequenceNode converts a sequence node to an array of strings, if possible. +func StringArrayForSequenceNode(node *yaml.Node) []string { + stringArray := make([]string, 0) + for _, item := range node.Content { + v, ok := StringForScalarNode(item) + if ok { + stringArray = append(stringArray, v) + } + } + return stringArray +} + // MissingKeysInMap identifies which keys from a list of required keys are not in a map. -func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string { +func MissingKeysInMap(m *yaml.Node, requiredKeys []string) []string { missingKeys := make([]string, 0) for _, k := range requiredKeys { if !MapHasKey(m, k) { @@ -95,64 +210,100 @@ func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string { } // InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. -func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { +func InvalidKeysInMap(m *yaml.Node, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { invalidKeys := make([]string, 0) - for _, item := range m { - itemKey, ok := item.Key.(string) - if ok { - key := itemKey - found := false - // does the key match an allowed key? - for _, allowedKey := range allowedKeys { - if key == allowedKey { + if m == nil || m.Kind != yaml.MappingNode { + return invalidKeys + } + for i := 0; i < len(m.Content); i += 2 { + key := m.Content[i].Value + found := false + // does the key match an allowed key? + for _, allowedKey := range allowedKeys { + if key == allowedKey { + found = true + break + } + } + if !found { + // does the key match an allowed pattern? + for _, allowedPattern := range allowedPatterns { + if allowedPattern.MatchString(key) { found = true break } } if !found { - // does the key match an allowed pattern? - for _, allowedPattern := range allowedPatterns { - if allowedPattern.MatchString(key) { - found = true - break - } - } - if !found { - invalidKeys = append(invalidKeys, key) - } + invalidKeys = append(invalidKeys, key) } } } return invalidKeys } -// DescribeMap describes a map (for debugging purposes). -func DescribeMap(in interface{}, indent string) string { - description := "" - m, ok := in.(map[string]interface{}) - if ok { - keys := make([]string, 0) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - v := m[k] - description += fmt.Sprintf("%s%s:\n", indent, k) - description += DescribeMap(v, indent+" ") - } - return description +// NewMappingNode creates a new Mapping node. +func NewMappingNode() *yaml.Node { + return &yaml.Node{ + Kind: yaml.MappingNode, + Content: make([]*yaml.Node, 0), } - a, ok := in.([]interface{}) - if ok { - for i, v := range a { - description += fmt.Sprintf("%s%d:\n", indent, i) - description += DescribeMap(v, indent+" ") - } - return description +} + +// NewSequenceNode creates a new Sequence node. +func NewSequenceNode() *yaml.Node { + node := &yaml.Node{ + Kind: yaml.SequenceNode, + Content: make([]*yaml.Node, 0), + } + return node +} + +// NewScalarNodeForString creates a new node to hold a string. +func NewScalarNodeForString(s string) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: s, + } +} + +// NewSequenceNodeForStringArray creates a new node to hold an array of strings. +func NewSequenceNodeForStringArray(strings []string) *yaml.Node { + node := &yaml.Node{ + Kind: yaml.SequenceNode, + Content: make([]*yaml.Node, 0), + } + for _, s := range strings { + node.Content = append(node.Content, NewScalarNodeForString(s)) + } + return node +} + +// NewScalarNodeForBool creates a new node to hold a bool. +func NewScalarNodeForBool(b bool) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!bool", + Value: fmt.Sprintf("%t", b), + } +} + +// NewScalarNodeForFloat creates a new node to hold a float. +func NewScalarNodeForFloat(f float64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!float", + Value: fmt.Sprintf("%g", f), + } +} + +// NewScalarNodeForInt creates a new node to hold an integer. +func NewScalarNodeForInt(i int64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!int", + Value: fmt.Sprintf("%d", i), } - description += fmt.Sprintf("%s%+v\n", indent, in) - return description } // PluralProperties returns the string "properties" pluralized. @@ -195,3 +346,40 @@ func StringValue(item interface{}) (value string, ok bool) { } return "", false } + +// Description returns a human-readable represention of an item. +func Description(item interface{}) string { + value, ok := item.(*yaml.Node) + if ok { + return jsonschema.Render(value) + } + return fmt.Sprintf("%+v", item) +} + +// Display returns a description of a node for use in error messages. +func Display(node *yaml.Node) string { + switch node.Kind { + case yaml.ScalarNode: + switch node.Tag { + case "!!str": + return fmt.Sprintf("%s (string)", node.Value) + } + } + return fmt.Sprintf("%+v (%T)", node, node) +} + +// Marshal creates a yaml version of a structure in our preferred style +func Marshal(in *yaml.Node) []byte { + clearStyle(in) + //bytes, _ := yaml.Marshal(&yaml.Node{Kind: yaml.DocumentNode, Content: []*yaml.Node{in}}) + bytes, _ := yaml.Marshal(in) + + return bytes +} + +func clearStyle(node *yaml.Node) { + node.Style = 0 + for _, c := range node.Content { + clearStyle(c) + } +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go index 9713a21cc..ce9fcc456 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/main.go +++ b/vendor/github.com/googleapis/gnostic/compiler/main.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go index 25affd063..be0e8b40c 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/reader.go +++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,6 @@ package compiler import ( - "errors" "fmt" "io/ioutil" "log" @@ -23,18 +22,30 @@ import ( "net/url" "path/filepath" "strings" + "sync" - yaml "gopkg.in/yaml.v2" + yaml "gopkg.in/yaml.v3" ) +var verboseReader = false + var fileCache map[string][]byte -var infoCache map[string]interface{} -var count int64 +var infoCache map[string]*yaml.Node -var verboseReader = false var fileCacheEnable = true var infoCacheEnable = true +// These locks are used to synchronize accesses to the fileCache and infoCache +// maps (above). They are global state and can throw thread-related errors +// when modified from separate goroutines. The general strategy is to protect +// all public functions in this file with mutex Lock() calls. As a result, to +// avoid deadlock, these public functions should not call other public +// functions, so some public functions have private equivalents. +// In the future, we might consider replacing the maps with sync.Map and +// eliminating these mutexes. +var fileCacheMutex sync.Mutex +var infoCacheMutex sync.Mutex + func initializeFileCache() { if fileCache == nil { fileCache = make(map[string][]byte, 0) @@ -43,19 +54,42 @@ func initializeFileCache() { func initializeInfoCache() { if infoCache == nil { - infoCache = make(map[string]interface{}, 0) + infoCache = make(map[string]*yaml.Node, 0) } } +// EnableFileCache turns on file caching. +func EnableFileCache() { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + fileCacheEnable = true +} + +// EnableInfoCache turns on parsed info caching. +func EnableInfoCache() { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + infoCacheEnable = true +} + +// DisableFileCache turns off file caching. func DisableFileCache() { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() fileCacheEnable = false } +// DisableInfoCache turns off parsed info caching. func DisableInfoCache() { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() infoCacheEnable = false } +// RemoveFromFileCache removes an entry from the file cache. func RemoveFromFileCache(fileurl string) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() if !fileCacheEnable { return } @@ -63,7 +97,10 @@ func RemoveFromFileCache(fileurl string) { delete(fileCache, fileurl) } +// RemoveFromInfoCache removes an entry from the info cache. func RemoveFromInfoCache(filename string) { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() if !infoCacheEnable { return } @@ -71,19 +108,44 @@ func RemoveFromInfoCache(filename string) { delete(infoCache, filename) } -func GetInfoCache() map[string]interface{} { +// GetInfoCache returns the info cache map. +func GetInfoCache() map[string]*yaml.Node { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() if infoCache == nil { initializeInfoCache() } return infoCache } +// ClearFileCache clears the file cache. +func ClearFileCache() { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + fileCache = make(map[string][]byte, 0) +} + +// ClearInfoCache clears the info cache. func ClearInfoCache() { - infoCache = make(map[string]interface{}) + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + infoCache = make(map[string]*yaml.Node) +} + +// ClearCaches clears all caches. +func ClearCaches() { + ClearFileCache() + ClearInfoCache() } // FetchFile gets a specified file from the local filesystem or a remote location. func FetchFile(fileurl string) ([]byte, error) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + return fetchFile(fileurl) +} + +func fetchFile(fileurl string) ([]byte, error) { var bytes []byte initializeFileCache() if fileCacheEnable { @@ -104,7 +166,7 @@ func FetchFile(fileurl string) ([]byte, error) { } defer response.Body.Close() if response.StatusCode != 200 { - return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status)) + return nil, fmt.Errorf("Error downloading %s: %s", fileurl, response.Status) } bytes, err = ioutil.ReadAll(response.Body) if fileCacheEnable && err == nil { @@ -115,11 +177,17 @@ func FetchFile(fileurl string) ([]byte, error) { // ReadBytesForFile reads the bytes of a file. func ReadBytesForFile(filename string) ([]byte, error) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + return readBytesForFile(filename) +} + +func readBytesForFile(filename string) ([]byte, error) { // is the filename a url? fileurl, _ := url.Parse(filename) if fileurl.Scheme != "" { // yes, fetch it - bytes, err := FetchFile(filename) + bytes, err := fetchFile(filename) if err != nil { return nil, err } @@ -133,8 +201,14 @@ func ReadBytesForFile(filename string) ([]byte, error) { return bytes, nil } -// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice. -func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { +// ReadInfoFromBytes unmarshals a file as a *yaml.Node. +func ReadInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + return readInfoFromBytes(filename, bytes) +} + +func readInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) { initializeInfoCache() if infoCacheEnable { cachedInfo, ok := infoCache[filename] @@ -148,19 +222,23 @@ func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { log.Printf("Reading info for file %s", filename) } } - var info yaml.MapSlice + var info yaml.Node err := yaml.Unmarshal(bytes, &info) if err != nil { return nil, err } if infoCacheEnable && len(filename) > 0 { - infoCache[filename] = info + infoCache[filename] = &info } - return info, nil + return &info, nil } // ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. -func ReadInfoForRef(basefile string, ref string) (interface{}, error) { +func ReadInfoForRef(basefile string, ref string) (*yaml.Node, error) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() initializeInfoCache() if infoCacheEnable { info, ok := infoCache[ref] @@ -174,7 +252,6 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) { log.Printf("Reading info for ref %s#%s", basefile, ref) } } - count = count + 1 basedir, _ := filepath.Split(basefile) parts := strings.Split(ref, "#") var filename string @@ -187,24 +264,30 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) { } else { filename = basefile } - bytes, err := ReadBytesForFile(filename) + bytes, err := readBytesForFile(filename) if err != nil { return nil, err } - info, err := ReadInfoFromBytes(filename, bytes) + info, err := readInfoFromBytes(filename, bytes) + if info != nil && info.Kind == yaml.DocumentNode { + info = info.Content[0] + } if err != nil { log.Printf("File error: %v\n", err) } else { + if info == nil { + return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) + } if len(parts) > 1 { path := strings.Split(parts[1], "/") for i, key := range path { if i > 0 { - m, ok := info.(yaml.MapSlice) - if ok { + m := info + if true { found := false - for _, section := range m { - if section.Key == key { - info = section.Value + for i := 0; i < len(m.Content); i += 2 { + if m.Content[i].Value == key { + info = m.Content[i+1] found = true } } diff --git a/vendor/github.com/googleapis/gnostic/extensions/README.md b/vendor/github.com/googleapis/gnostic/extensions/README.md index ff1c2eb1e..4b5d63e58 100644 --- a/vendor/github.com/googleapis/gnostic/extensions/README.md +++ b/vendor/github.com/googleapis/gnostic/extensions/README.md @@ -1,5 +1,13 @@ # Extensions -This directory contains support code for building Gnostic extensions and associated examples. +**Extension Support is experimental.** -Extensions are used to compile vendor or specification extensions into protocol buffer structures. +This directory contains support code for building Gnostic extensio handlers and +associated examples. + +Extension handlers can be used to compile vendor or specification extensions +into protocol buffer structures. + +Like plugins, extension handlers are built as separate executables. Extension +bodies are written to extension handlers as serialized +ExtensionHandlerRequests. diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go index 432dc06e6..4198fa17e 100644 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go @@ -1,148 +1,186 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.12.3 // source: extensions/extension.proto -package openapiextension_v1 +package gnostic_extension_v1 import ( - fmt "fmt" proto "github.com/golang/protobuf/proto" any "github.com/golang/protobuf/ptypes/any" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 -// The version number of OpenAPI compiler. +// The version number of Gnostic. type Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"` // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should // be empty for mainline stable releases. - Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"` } -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_661e47e790f76671, []int{0} +func (x *Version) Reset() { + *x = Version{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Version) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Version.Unmarshal(m, b) -} -func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Version.Marshal(b, m, deterministic) -} -func (m *Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_Version.Merge(m, src) +func (x *Version) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Version) XXX_Size() int { - return xxx_messageInfo_Version.Size(m) -} -func (m *Version) XXX_DiscardUnknown() { - xxx_messageInfo_Version.DiscardUnknown(m) + +func (*Version) ProtoMessage() {} + +func (x *Version) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Version proto.InternalMessageInfo +// Deprecated: Use Version.ProtoReflect.Descriptor instead. +func (*Version) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{0} +} -func (m *Version) GetMajor() int32 { - if m != nil { - return m.Major +func (x *Version) GetMajor() int32 { + if x != nil { + return x.Major } return 0 } -func (m *Version) GetMinor() int32 { - if m != nil { - return m.Minor +func (x *Version) GetMinor() int32 { + if x != nil { + return x.Minor } return 0 } -func (m *Version) GetPatch() int32 { - if m != nil { - return m.Patch +func (x *Version) GetPatch() int32 { + if x != nil { + return x.Patch } return 0 } -func (m *Version) GetSuffix() string { - if m != nil { - return m.Suffix +func (x *Version) GetSuffix() string { + if x != nil { + return x.Suffix } return "" } // An encoded Request is written to the ExtensionHandler's stdin. type ExtensionHandlerRequest struct { - // The OpenAPI descriptions that were explicitly listed on the command line. - // The specifications will appear in the order they are specified to gnostic. + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The extension to process. Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"` - // The version number of openapi compiler. - CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // The version number of Gnostic. + CompilerVersion *Version `protobuf:"bytes,2,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"` } -func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} } -func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) } -func (*ExtensionHandlerRequest) ProtoMessage() {} -func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_661e47e790f76671, []int{1} +func (x *ExtensionHandlerRequest) Reset() { + *x = ExtensionHandlerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ExtensionHandlerRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExtensionHandlerRequest.Unmarshal(m, b) -} -func (m *ExtensionHandlerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExtensionHandlerRequest.Marshal(b, m, deterministic) -} -func (m *ExtensionHandlerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtensionHandlerRequest.Merge(m, src) -} -func (m *ExtensionHandlerRequest) XXX_Size() int { - return xxx_messageInfo_ExtensionHandlerRequest.Size(m) +func (x *ExtensionHandlerRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ExtensionHandlerRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExtensionHandlerRequest.DiscardUnknown(m) + +func (*ExtensionHandlerRequest) ProtoMessage() {} + +func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ExtensionHandlerRequest proto.InternalMessageInfo +// Deprecated: Use ExtensionHandlerRequest.ProtoReflect.Descriptor instead. +func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{1} +} -func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper { - if m != nil { - return m.Wrapper +func (x *ExtensionHandlerRequest) GetWrapper() *Wrapper { + if x != nil { + return x.Wrapper } return nil } -func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version { - if m != nil { - return m.CompilerVersion +func (x *ExtensionHandlerRequest) GetCompilerVersion() *Version { + if x != nil { + return x.CompilerVersion } return nil } // The extensions writes an encoded ExtensionHandlerResponse to stdout. type ExtensionHandlerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // true if the extension is handled by the extension handler; false otherwise Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"` - // Error message. If non-empty, the extension handling failed. + // Error message(s). If non-empty, the extension handling failed. // The extension handler process should exit with status code zero // even if it reports an error in this way. // @@ -151,150 +189,277 @@ type ExtensionHandlerResponse struct { // itself -- such as the input Document being unparseable -- should be // reported by writing a message to stderr and exiting with a non-zero // status code. - Error []string `protobuf:"bytes,2,rep,name=error,proto3" json:"error,omitempty"` + Errors []string `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"` // text output - Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` } -func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} } -func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } -func (*ExtensionHandlerResponse) ProtoMessage() {} -func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_661e47e790f76671, []int{2} +func (x *ExtensionHandlerResponse) Reset() { + *x = ExtensionHandlerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ExtensionHandlerResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExtensionHandlerResponse.Unmarshal(m, b) -} -func (m *ExtensionHandlerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExtensionHandlerResponse.Marshal(b, m, deterministic) -} -func (m *ExtensionHandlerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtensionHandlerResponse.Merge(m, src) +func (x *ExtensionHandlerResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ExtensionHandlerResponse) XXX_Size() int { - return xxx_messageInfo_ExtensionHandlerResponse.Size(m) -} -func (m *ExtensionHandlerResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExtensionHandlerResponse.DiscardUnknown(m) + +func (*ExtensionHandlerResponse) ProtoMessage() {} + +func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ExtensionHandlerResponse proto.InternalMessageInfo +// Deprecated: Use ExtensionHandlerResponse.ProtoReflect.Descriptor instead. +func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{2} +} -func (m *ExtensionHandlerResponse) GetHandled() bool { - if m != nil { - return m.Handled +func (x *ExtensionHandlerResponse) GetHandled() bool { + if x != nil { + return x.Handled } return false } -func (m *ExtensionHandlerResponse) GetError() []string { - if m != nil { - return m.Error +func (x *ExtensionHandlerResponse) GetErrors() []string { + if x != nil { + return x.Errors } return nil } -func (m *ExtensionHandlerResponse) GetValue() *any.Any { - if m != nil { - return m.Value +func (x *ExtensionHandlerResponse) GetValue() *any.Any { + if x != nil { + return x.Value } return nil } type Wrapper struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // version of the OpenAPI specification in which this extension was written. Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - // Name of the extension + // Name of the extension. ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"` - // Must be a valid yaml for the proto - Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // YAML-formatted extension value. + Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"` } -func (m *Wrapper) Reset() { *m = Wrapper{} } -func (m *Wrapper) String() string { return proto.CompactTextString(m) } -func (*Wrapper) ProtoMessage() {} -func (*Wrapper) Descriptor() ([]byte, []int) { - return fileDescriptor_661e47e790f76671, []int{3} +func (x *Wrapper) Reset() { + *x = Wrapper{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Wrapper) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Wrapper.Unmarshal(m, b) +func (x *Wrapper) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Wrapper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Wrapper.Marshal(b, m, deterministic) -} -func (m *Wrapper) XXX_Merge(src proto.Message) { - xxx_messageInfo_Wrapper.Merge(m, src) -} -func (m *Wrapper) XXX_Size() int { - return xxx_messageInfo_Wrapper.Size(m) -} -func (m *Wrapper) XXX_DiscardUnknown() { - xxx_messageInfo_Wrapper.DiscardUnknown(m) + +func (*Wrapper) ProtoMessage() {} + +func (x *Wrapper) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Wrapper proto.InternalMessageInfo +// Deprecated: Use Wrapper.ProtoReflect.Descriptor instead. +func (*Wrapper) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{3} +} -func (m *Wrapper) GetVersion() string { - if m != nil { - return m.Version +func (x *Wrapper) GetVersion() string { + if x != nil { + return x.Version } return "" } -func (m *Wrapper) GetExtensionName() string { - if m != nil { - return m.ExtensionName +func (x *Wrapper) GetExtensionName() string { + if x != nil { + return x.ExtensionName } return "" } -func (m *Wrapper) GetYaml() string { - if m != nil { - return m.Yaml +func (x *Wrapper) GetYaml() string { + if x != nil { + return x.Yaml } return "" } -func init() { - proto.RegisterType((*Version)(nil), "openapiextension.v1.Version") - proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest") - proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse") - proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper") -} - -func init() { proto.RegisterFile("extensions/extension.proto", fileDescriptor_661e47e790f76671) } - -var fileDescriptor_661e47e790f76671 = []byte{ - // 362 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xeb, 0x40, - 0x18, 0x85, 0x49, 0xbf, 0x72, 0x33, 0x97, 0xdb, 0x2b, 0x63, 0xd1, 0x58, 0x5c, 0x94, 0x80, 0x50, - 0x44, 0xa6, 0x54, 0xc1, 0x7d, 0x0b, 0x45, 0xdd, 0xd8, 0x32, 0x8b, 0xba, 0xb3, 0x4c, 0xd3, 0xb7, - 0x69, 0x24, 0x99, 0x19, 0x27, 0x1f, 0xb6, 0x7f, 0xc5, 0xa5, 0xbf, 0x54, 0x32, 0x93, 0xc4, 0x85, - 0xba, 0x9b, 0xf3, 0x70, 0xda, 0xf7, 0x9c, 0x13, 0xd4, 0x87, 0x7d, 0x0a, 0x3c, 0x09, 0x05, 0x4f, - 0x46, 0xf5, 0x93, 0x48, 0x25, 0x52, 0x81, 0x8f, 0x85, 0x04, 0xce, 0x64, 0xf8, 0xc5, 0xf3, 0x71, - 0xff, 0x2c, 0x10, 0x22, 0x88, 0x60, 0xa4, 0x2d, 0xeb, 0x6c, 0x3b, 0x62, 0xfc, 0x60, 0xfc, 0x9e, - 0x8f, 0xec, 0x25, 0xa8, 0xc2, 0x88, 0x7b, 0xa8, 0x1d, 0xb3, 0x17, 0xa1, 0x5c, 0x6b, 0x60, 0x0d, - 0xdb, 0xd4, 0x08, 0x4d, 0x43, 0x2e, 0x94, 0xdb, 0x28, 0x69, 0x21, 0x0a, 0x2a, 0x59, 0xea, 0xef, - 0xdc, 0xa6, 0xa1, 0x5a, 0xe0, 0x13, 0xd4, 0x49, 0xb2, 0xed, 0x36, 0xdc, 0xbb, 0xad, 0x81, 0x35, - 0x74, 0x68, 0xa9, 0xbc, 0x77, 0x0b, 0x9d, 0xce, 0xaa, 0x40, 0xf7, 0x8c, 0x6f, 0x22, 0x50, 0x14, - 0x5e, 0x33, 0x48, 0x52, 0x7c, 0x8b, 0xec, 0x37, 0xc5, 0xa4, 0x04, 0x73, 0xf7, 0xef, 0xf5, 0x39, - 0xf9, 0xa1, 0x02, 0x79, 0x32, 0x1e, 0x5a, 0x99, 0xf1, 0x1d, 0x3a, 0xf2, 0x45, 0x2c, 0xc3, 0x08, - 0xd4, 0x2a, 0x37, 0x0d, 0x74, 0x98, 0xdf, 0xfe, 0xa0, 0x6c, 0x49, 0xff, 0x57, 0xbf, 0x2a, 0x81, - 0x97, 0x23, 0xf7, 0x7b, 0xb6, 0x44, 0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd3, 0x68, 0xa3, 0xc3, - 0xfd, 0xa1, 0x95, 0x2c, 0x06, 0x00, 0xa5, 0xf4, 0x2c, 0xcd, 0xa1, 0x43, 0x8d, 0xc0, 0x97, 0xa8, - 0x9d, 0xb3, 0x28, 0x83, 0x32, 0x49, 0x8f, 0x98, 0xe1, 0x49, 0x35, 0x3c, 0x99, 0xf0, 0x03, 0x35, - 0x16, 0xef, 0x19, 0xd9, 0x65, 0xa9, 0xe2, 0x4c, 0x55, 0xc1, 0xd2, 0xc3, 0x55, 0x12, 0x5f, 0xa0, - 0x6e, 0xdd, 0x62, 0xc5, 0x59, 0x0c, 0xfa, 0x33, 0x38, 0xf4, 0x5f, 0x4d, 0x1f, 0x59, 0x0c, 0x18, - 0xa3, 0xd6, 0x81, 0xc5, 0x91, 0x3e, 0xeb, 0x50, 0xfd, 0x9e, 0x5e, 0xa1, 0xae, 0x50, 0x01, 0x09, - 0xb8, 0x48, 0xd2, 0xd0, 0x27, 0xf9, 0x78, 0x8a, 0xe7, 0x12, 0xf8, 0x64, 0xf1, 0x50, 0xd7, 0x5d, - 0x8e, 0x17, 0xd6, 0x47, 0xa3, 0x39, 0x9f, 0xcc, 0xd6, 0x1d, 0x1d, 0xf1, 0xe6, 0x33, 0x00, 0x00, - 0xff, 0xff, 0xeb, 0xf3, 0xfa, 0x65, 0x5c, 0x02, 0x00, 0x00, +var File_extensions_extension_proto protoreflect.FileDescriptor + +var file_extensions_extension_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, + 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, + 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, + 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, + 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, + 0x69, 0x78, 0x22, 0x9c, 0x01, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, + 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x07, + 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x78, 0x0a, 0x18, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, + 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, + 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5e, 0x0a, 0x07, 0x57, + 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x42, 0x4b, 0x0a, 0x0e, 0x6f, + 0x72, 0x67, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x47, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, + 0x01, 0x5a, 0x1f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, + 0x76, 0x31, 0xa2, 0x02, 0x03, 0x47, 0x4e, 0x58, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_extensions_extension_proto_rawDescOnce sync.Once + file_extensions_extension_proto_rawDescData = file_extensions_extension_proto_rawDesc +) + +func file_extensions_extension_proto_rawDescGZIP() []byte { + file_extensions_extension_proto_rawDescOnce.Do(func() { + file_extensions_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_extensions_extension_proto_rawDescData) + }) + return file_extensions_extension_proto_rawDescData +} + +var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_extensions_extension_proto_goTypes = []interface{}{ + (*Version)(nil), // 0: gnostic.extension.v1.Version + (*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest + (*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse + (*Wrapper)(nil), // 3: gnostic.extension.v1.Wrapper + (*any.Any)(nil), // 4: google.protobuf.Any +} +var file_extensions_extension_proto_depIdxs = []int32{ + 3, // 0: gnostic.extension.v1.ExtensionHandlerRequest.wrapper:type_name -> gnostic.extension.v1.Wrapper + 0, // 1: gnostic.extension.v1.ExtensionHandlerRequest.compiler_version:type_name -> gnostic.extension.v1.Version + 4, // 2: gnostic.extension.v1.ExtensionHandlerResponse.value:type_name -> google.protobuf.Any + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_extensions_extension_proto_init() } +func file_extensions_extension_proto_init() { + if File_extensions_extension_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionHandlerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionHandlerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Wrapper); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_extensions_extension_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_extensions_extension_proto_goTypes, + DependencyIndexes: file_extensions_extension_proto_depIdxs, + MessageInfos: file_extensions_extension_proto_msgTypes, + }.Build() + File_extensions_extension_proto = out.File + file_extensions_extension_proto_rawDesc = nil + file_extensions_extension_proto_goTypes = nil + file_extensions_extension_proto_depIdxs = nil } diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto index 04856f913..8ac1faffc 100644 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.proto +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.proto @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,9 @@ syntax = "proto3"; +package gnostic.extension.v1; + import "google/protobuf/any.proto"; -package openapiextension.v1; // This option lets the proto compiler generate Java code inside the package // name (see below) instead of inside an outer class. It creates a simpler @@ -26,7 +27,7 @@ option java_multiple_files = true; // The Java outer classname should be the filename in UpperCamelCase. This // class is only used to hold proto descriptor, so developers don't need to // work with it directly. -option java_outer_classname = "OpenAPIExtensionV1"; +option java_outer_classname = "GnosticExtension"; // The Java package name must be proto package name with proper prefix. option java_package = "org.gnostic.v1"; @@ -37,9 +38,12 @@ option java_package = "org.gnostic.v1"; // hopefully unique enough to not conflict with things that may come along in // the future. 'GPB' is reserved for the protocol buffer implementation itself. // -option objc_class_prefix = "OAE"; // "OpenAPI Extension" +option objc_class_prefix = "GNX"; // "Gnostic Extension" + +// The Go package name. +option go_package = "extensions;gnostic_extension_v1"; -// The version number of OpenAPI compiler. +// The version number of Gnostic. message Version { int32 major = 1; int32 minor = 2; @@ -52,12 +56,11 @@ message Version { // An encoded Request is written to the ExtensionHandler's stdin. message ExtensionHandlerRequest { - // The OpenAPI descriptions that were explicitly listed on the command line. - // The specifications will appear in the order they are specified to gnostic. + // The extension to process. Wrapper wrapper = 1; - // The version number of openapi compiler. - Version compiler_version = 3; + // The version number of Gnostic. + Version compiler_version = 2; } // The extensions writes an encoded ExtensionHandlerResponse to stdout. @@ -66,7 +69,7 @@ message ExtensionHandlerResponse { // true if the extension is handled by the extension handler; false otherwise bool handled = 1; - // Error message. If non-empty, the extension handling failed. + // Error message(s). If non-empty, the extension handling failed. // The extension handler process should exit with status code zero // even if it reports an error in this way. // @@ -75,7 +78,7 @@ message ExtensionHandlerResponse { // itself -- such as the input Document being unparseable -- should be // reported by writing a message to stderr and exiting with a non-zero // status code. - repeated string error = 2; + repeated string errors = 2; // text output google.protobuf.Any value = 3; @@ -85,9 +88,9 @@ message Wrapper { // version of the OpenAPI specification in which this extension was written. string version = 1; - // Name of the extension + // Name of the extension. string extension_name = 2; - // Must be a valid yaml for the proto + // YAML-formatted extension value. string yaml = 3; } diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/googleapis/gnostic/extensions/extensions.go index 94a8e62a7..ec8afd009 100644 --- a/vendor/github.com/googleapis/gnostic/extensions/extensions.go +++ b/vendor/github.com/googleapis/gnostic/extensions/extensions.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,71 +12,53 @@ // See the License for the specific language governing permissions and // limitations under the License. -package openapiextension_v1 +package gnostic_extension_v1 import ( - "fmt" "io/ioutil" + "log" "os" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" ) -type documentHandler func(version string, extensionName string, document string) type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) -func forInputYamlFromOpenapic(handler documentHandler) { +// Main implements the main program of an extension handler. +func Main(handler extensionHandler) { + // unpack the request data, err := ioutil.ReadAll(os.Stdin) if err != nil { - fmt.Println("File error:", err.Error()) + log.Println("File error:", err.Error()) os.Exit(1) } if len(data) == 0 { - fmt.Println("No input data.") + log.Println("No input data.") os.Exit(1) } request := &ExtensionHandlerRequest{} err = proto.Unmarshal(data, request) if err != nil { - fmt.Println("Input error:", err.Error()) + log.Println("Input error:", err.Error()) os.Exit(1) } - handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml) -} - -// ProcessExtension calles the handler for a specified extension. -func ProcessExtension(handleExtension extensionHandler) { - response := &ExtensionHandlerResponse{} - forInputYamlFromOpenapic( - func(version string, extensionName string, yamlInput string) { - var newObject proto.Message - var err error - - handled, newObject, err := handleExtension(extensionName, yamlInput) - if !handled { - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) - os.Exit(0) - } - - // If we reach here, then the extension is handled - response.Handled = true - if err != nil { - response.Error = append(response.Error, err.Error()) - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) - os.Exit(0) - } - response.Value, err = ptypes.MarshalAny(newObject) - if err != nil { - response.Error = append(response.Error, err.Error()) - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) - os.Exit(0) - } - }) - + // call the handler + handled, output, err := handler(request.Wrapper.ExtensionName, request.Wrapper.Yaml) + // respond with the output of the handler + response := &ExtensionHandlerResponse{ + Handled: false, // default assumption + Errors: make([]string, 0), + } + if err != nil { + response.Errors = append(response.Errors, err.Error()) + } else if handled { + response.Handled = true + response.Value, err = ptypes.MarshalAny(output) + if err != nil { + response.Errors = append(response.Errors, err.Error()) + } + } responseBytes, _ := proto.Marshal(response) os.Stdout.Write(responseBytes) } diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/README.md b/vendor/github.com/googleapis/gnostic/jsonschema/README.md new file mode 100644 index 000000000..6793c5179 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/README.md @@ -0,0 +1,4 @@ +# jsonschema + +This directory contains code for reading, writing, and manipulating JSON +schemas. diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/base.go b/vendor/github.com/googleapis/gnostic/jsonschema/base.go new file mode 100644 index 000000000..0af8b148b --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/base.go @@ -0,0 +1,84 @@ + +// THIS FILE IS AUTOMATICALLY GENERATED. + +package jsonschema + +import ( + "encoding/base64" +) + +func baseSchemaBytes() ([]byte, error){ + return base64.StdEncoding.DecodeString( +`ewogICAgImlkIjogImh0dHA6Ly9qc29uLXNjaGVtYS5vcmcvZHJhZnQtMDQvc2NoZW1hIyIsCiAgICAi +JHNjaGVtYSI6ICJodHRwOi8vanNvbi1zY2hlbWEub3JnL2RyYWZ0LTA0L3NjaGVtYSMiLAogICAgImRl +c2NyaXB0aW9uIjogIkNvcmUgc2NoZW1hIG1ldGEtc2NoZW1hIiwKICAgICJkZWZpbml0aW9ucyI6IHsK +ICAgICAgICAic2NoZW1hQXJyYXkiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImFycmF5IiwKICAgICAg +ICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjIiB9CiAg +ICAgICAgfSwKICAgICAgICAicG9zaXRpdmVJbnRlZ2VyIjogewogICAgICAgICAgICAidHlwZSI6ICJp +bnRlZ2VyIiwKICAgICAgICAgICAgIm1pbmltdW0iOiAwCiAgICAgICAgfSwKICAgICAgICAicG9zaXRp +dmVJbnRlZ2VyRGVmYXVsdDAiOiB7CiAgICAgICAgICAgICJhbGxPZiI6IFsgeyAiJHJlZiI6ICIjL2Rl +ZmluaXRpb25zL3Bvc2l0aXZlSW50ZWdlciIgfSwgeyAiZGVmYXVsdCI6IDAgfSBdCiAgICAgICAgfSwK +ICAgICAgICAic2ltcGxlVHlwZXMiOiB7CiAgICAgICAgICAgICJlbnVtIjogWyAiYXJyYXkiLCAiYm9v +bGVhbiIsICJpbnRlZ2VyIiwgIm51bGwiLCAibnVtYmVyIiwgIm9iamVjdCIsICJzdHJpbmciIF0KICAg +ICAgICB9LAogICAgICAgICJzdHJpbmdBcnJheSI6IHsKICAgICAgICAgICAgInR5cGUiOiAiYXJyYXki +LAogICAgICAgICAgICAiaXRlbXMiOiB7ICJ0eXBlIjogInN0cmluZyIgfSwKICAgICAgICAgICAgIm1p +bkl0ZW1zIjogMSwKICAgICAgICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0KICAgIH0s +CiAgICAidHlwZSI6ICJvYmplY3QiLAogICAgInByb3BlcnRpZXMiOiB7CiAgICAgICAgImlkIjogewog +ICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAogICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAg +ICAgICB9LAogICAgICAgICIkc2NoZW1hIjogewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAog +ICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAgICAgICB9LAogICAgICAgICJ0aXRsZSI6IHsKICAg +ICAgICAgICAgInR5cGUiOiAic3RyaW5nIgogICAgICAgIH0sCiAgICAgICAgImRlc2NyaXB0aW9uIjog +ewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciCiAgICAgICAgfSwKICAgICAgICAiZGVmYXVsdCI6 +IHt9LAogICAgICAgICJtdWx0aXBsZU9mIjogewogICAgICAgICAgICAidHlwZSI6ICJudW1iZXIiLAog +ICAgICAgICAgICAibWluaW11bSI6IDAsCiAgICAgICAgICAgICJleGNsdXNpdmVNaW5pbXVtIjogdHJ1 +ZQogICAgICAgIH0sCiAgICAgICAgIm1heGltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJl +ciIKICAgICAgICB9LAogICAgICAgICJleGNsdXNpdmVNYXhpbXVtIjogewogICAgICAgICAgICAidHlw +ZSI6ICJib29sZWFuIiwKICAgICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAg +ICAgIm1pbmltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJlciIKICAgICAgICB9LAogICAg +ICAgICJleGNsdXNpdmVNaW5pbXVtIjogewogICAgICAgICAgICAidHlwZSI6ICJib29sZWFuIiwKICAg +ICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAgICAgIm1heExlbmd0aCI6IHsg +IiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pbkxlbmd0 +aCI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAg +ICAgICAicGF0dGVybiI6IHsKICAgICAgICAgICAgInR5cGUiOiAic3RyaW5nIiwKICAgICAgICAgICAg +ImZvcm1hdCI6ICJyZWdleCIKICAgICAgICB9LAogICAgICAgICJhZGRpdGlvbmFsSXRlbXMiOiB7CiAg +ICAgICAgICAgICJhbnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgInR5cGUiOiAiYm9vbGVhbiIgfSwK +ICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfQogICAgICAgICAgICBdLAogICAgICAgICAgICAi +ZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAiaXRlbXMiOiB7CiAgICAgICAgICAgICJhbnlP +ZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgICAgIHsgIiRy +ZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIgfQogICAgICAgICAgICBdLAogICAgICAgICAg +ICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAibWF4SXRlbXMiOiB7ICIkcmVmIjogIiMv +ZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyIiB9LAogICAgICAgICJtaW5JdGVtcyI6IHsgIiRyZWYi +OiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAgICAgICAidW5pcXVl +SXRlbXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImJvb2xlYW4iLAogICAgICAgICAgICAiZGVmYXVs +dCI6IGZhbHNlCiAgICAgICAgfSwKICAgICAgICAibWF4UHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIy9k +ZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pblByb3BlcnRpZXMiOiB7ICIk +cmVmIjogIiMvZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyRGVmYXVsdDAiIH0sCiAgICAgICAgInJl +cXVpcmVkIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3N0cmluZ0FycmF5IiB9LAogICAgICAgICJh +ZGRpdGlvbmFsUHJvcGVydGllcyI6IHsKICAgICAgICAgICAgImFueU9mIjogWwogICAgICAgICAgICAg +ICAgeyAidHlwZSI6ICJib29sZWFuIiB9LAogICAgICAgICAgICAgICAgeyAiJHJlZiI6ICIjIiB9CiAg +ICAgICAgICAgIF0sCiAgICAgICAgICAgICJkZWZhdWx0Ijoge30KICAgICAgICB9LAogICAgICAgICJk +ZWZpbml0aW9ucyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2JqZWN0IiwKICAgICAgICAgICAgImFk +ZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9LAogICAgICAgICAgICAiZGVmYXVsdCI6 +IHt9CiAgICAgICAgfSwKICAgICAgICAicHJvcGVydGllcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAi +b2JqZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9 +LAogICAgICAgICAgICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAicGF0dGVyblByb3Bl +cnRpZXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm9iamVjdCIsCiAgICAgICAgICAgICJhZGRpdGlv +bmFsUHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgImRlZmF1bHQiOiB7fQog +ICAgICAgIH0sCiAgICAgICAgImRlcGVuZGVuY2llcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2Jq +ZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogewogICAgICAgICAgICAgICAg +ImFueU9mIjogWwogICAgICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAg +ICAgICAgICB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc3RyaW5nQXJyYXkiIH0KICAgICAgICAgICAg +ICAgIF0KICAgICAgICAgICAgfQogICAgICAgIH0sCiAgICAgICAgImVudW0iOiB7CiAgICAgICAgICAg +ICJ0eXBlIjogImFycmF5IiwKICAgICAgICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgInVu +aXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0sCiAgICAgICAgInR5cGUiOiB7CiAgICAgICAgICAgICJh +bnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zaW1wbGVUeXBl +cyIgfSwKICAgICAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICAgICAidHlwZSI6ICJhcnJheSIs +CiAgICAgICAgICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NpbXBs +ZVR5cGVzIiB9LAogICAgICAgICAgICAgICAgICAgICJtaW5JdGVtcyI6IDEsCiAgICAgICAgICAgICAg +ICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgICAgICAgICAgfQogICAgICAgICAgICBdCiAg +ICAgICAgfSwKICAgICAgICAiYWxsT2YiOiB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc2NoZW1hQXJy +YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5 +IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg +fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6 +IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1 +c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)} \ No newline at end of file diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/display.go b/vendor/github.com/googleapis/gnostic/jsonschema/display.go new file mode 100644 index 000000000..028a760a9 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/display.go @@ -0,0 +1,229 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonschema + +import ( + "fmt" + "strings" +) + +// +// DISPLAY +// The following methods display Schemas. +// + +// Description returns a string representation of a string or string array. +func (s *StringOrStringArray) Description() string { + if s.String != nil { + return *s.String + } + if s.StringArray != nil { + return strings.Join(*s.StringArray, ", ") + } + return "" +} + +// Returns a string representation of a Schema. +func (schema *Schema) String() string { + return schema.describeSchema("") +} + +// Helper: Returns a string representation of a Schema indented by a specified string. +func (schema *Schema) describeSchema(indent string) string { + result := "" + if schema.Schema != nil { + result += indent + "$schema: " + *(schema.Schema) + "\n" + } + if schema.ID != nil { + result += indent + "id: " + *(schema.ID) + "\n" + } + if schema.MultipleOf != nil { + result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf)) + } + if schema.Maximum != nil { + result += indent + fmt.Sprintf("maximum: %+v\n", *(schema.Maximum)) + } + if schema.ExclusiveMaximum != nil { + result += indent + fmt.Sprintf("exclusiveMaximum: %+v\n", *(schema.ExclusiveMaximum)) + } + if schema.Minimum != nil { + result += indent + fmt.Sprintf("minimum: %+v\n", *(schema.Minimum)) + } + if schema.ExclusiveMinimum != nil { + result += indent + fmt.Sprintf("exclusiveMinimum: %+v\n", *(schema.ExclusiveMinimum)) + } + if schema.MaxLength != nil { + result += indent + fmt.Sprintf("maxLength: %+v\n", *(schema.MaxLength)) + } + if schema.MinLength != nil { + result += indent + fmt.Sprintf("minLength: %+v\n", *(schema.MinLength)) + } + if schema.Pattern != nil { + result += indent + fmt.Sprintf("pattern: %+v\n", *(schema.Pattern)) + } + if schema.AdditionalItems != nil { + s := schema.AdditionalItems.Schema + if s != nil { + result += indent + "additionalItems:\n" + result += s.describeSchema(indent + " ") + } else { + b := *(schema.AdditionalItems.Boolean) + result += indent + fmt.Sprintf("additionalItems: %+v\n", b) + } + } + if schema.Items != nil { + result += indent + "items:\n" + items := schema.Items + if items.SchemaArray != nil { + for i, s := range *(items.SchemaArray) { + result += indent + " " + fmt.Sprintf("%d", i) + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } else if items.Schema != nil { + result += items.Schema.describeSchema(indent + " " + " ") + } + } + if schema.MaxItems != nil { + result += indent + fmt.Sprintf("maxItems: %+v\n", *(schema.MaxItems)) + } + if schema.MinItems != nil { + result += indent + fmt.Sprintf("minItems: %+v\n", *(schema.MinItems)) + } + if schema.UniqueItems != nil { + result += indent + fmt.Sprintf("uniqueItems: %+v\n", *(schema.UniqueItems)) + } + if schema.MaxProperties != nil { + result += indent + fmt.Sprintf("maxProperties: %+v\n", *(schema.MaxProperties)) + } + if schema.MinProperties != nil { + result += indent + fmt.Sprintf("minProperties: %+v\n", *(schema.MinProperties)) + } + if schema.Required != nil { + result += indent + fmt.Sprintf("required: %+v\n", *(schema.Required)) + } + if schema.AdditionalProperties != nil { + s := schema.AdditionalProperties.Schema + if s != nil { + result += indent + "additionalProperties:\n" + result += s.describeSchema(indent + " ") + } else { + b := *(schema.AdditionalProperties.Boolean) + result += indent + fmt.Sprintf("additionalProperties: %+v\n", b) + } + } + if schema.Properties != nil { + result += indent + "properties:\n" + for _, pair := range *(schema.Properties) { + name := pair.Name + s := pair.Value + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } + if schema.PatternProperties != nil { + result += indent + "patternProperties:\n" + for _, pair := range *(schema.PatternProperties) { + name := pair.Name + s := pair.Value + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } + if schema.Dependencies != nil { + result += indent + "dependencies:\n" + for _, pair := range *(schema.Dependencies) { + name := pair.Name + schemaOrStringArray := pair.Value + s := schemaOrStringArray.Schema + if s != nil { + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } else { + a := schemaOrStringArray.StringArray + if a != nil { + result += indent + " " + name + ":\n" + for _, s2 := range *a { + result += indent + " " + " " + s2 + "\n" + } + } + } + + } + } + if schema.Enumeration != nil { + result += indent + "enumeration:\n" + for _, value := range *(schema.Enumeration) { + if value.String != nil { + result += indent + " " + fmt.Sprintf("%+v\n", *value.String) + } else { + result += indent + " " + fmt.Sprintf("%+v\n", *value.Bool) + } + } + } + if schema.Type != nil { + result += indent + fmt.Sprintf("type: %+v\n", schema.Type.Description()) + } + if schema.AllOf != nil { + result += indent + "allOf:\n" + for _, s := range *(schema.AllOf) { + result += s.describeSchema(indent + " ") + result += indent + "-\n" + } + } + if schema.AnyOf != nil { + result += indent + "anyOf:\n" + for _, s := range *(schema.AnyOf) { + result += s.describeSchema(indent + " ") + result += indent + "-\n" + } + } + if schema.OneOf != nil { + result += indent + "oneOf:\n" + for _, s := range *(schema.OneOf) { + result += s.describeSchema(indent + " ") + result += indent + "-\n" + } + } + if schema.Not != nil { + result += indent + "not:\n" + result += schema.Not.describeSchema(indent + " ") + } + if schema.Definitions != nil { + result += indent + "definitions:\n" + for _, pair := range *(schema.Definitions) { + name := pair.Name + s := pair.Value + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } + if schema.Title != nil { + result += indent + "title: " + *(schema.Title) + "\n" + } + if schema.Description != nil { + result += indent + "description: " + *(schema.Description) + "\n" + } + if schema.Default != nil { + result += indent + "default:\n" + result += indent + fmt.Sprintf(" %+v\n", *(schema.Default)) + } + if schema.Format != nil { + result += indent + "format: " + *(schema.Format) + "\n" + } + if schema.Ref != nil { + result += indent + "$ref: " + *(schema.Ref) + "\n" + } + return result +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/models.go b/vendor/github.com/googleapis/gnostic/jsonschema/models.go new file mode 100644 index 000000000..4781bdc5f --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/models.go @@ -0,0 +1,228 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package jsonschema supports the reading, writing, and manipulation +// of JSON Schemas. +package jsonschema + +import "gopkg.in/yaml.v3" + +// The Schema struct models a JSON Schema and, because schemas are +// defined hierarchically, contains many references to itself. +// All fields are pointers and are nil if the associated values +// are not specified. +type Schema struct { + Schema *string // $schema + ID *string // id keyword used for $ref resolution scope + Ref *string // $ref, i.e. JSON Pointers + + // http://json-schema.org/latest/json-schema-validation.html + // 5.1. Validation keywords for numeric instances (number and integer) + MultipleOf *SchemaNumber + Maximum *SchemaNumber + ExclusiveMaximum *bool + Minimum *SchemaNumber + ExclusiveMinimum *bool + + // 5.2. Validation keywords for strings + MaxLength *int64 + MinLength *int64 + Pattern *string + + // 5.3. Validation keywords for arrays + AdditionalItems *SchemaOrBoolean + Items *SchemaOrSchemaArray + MaxItems *int64 + MinItems *int64 + UniqueItems *bool + + // 5.4. Validation keywords for objects + MaxProperties *int64 + MinProperties *int64 + Required *[]string + AdditionalProperties *SchemaOrBoolean + Properties *[]*NamedSchema + PatternProperties *[]*NamedSchema + Dependencies *[]*NamedSchemaOrStringArray + + // 5.5. Validation keywords for any instance type + Enumeration *[]SchemaEnumValue + Type *StringOrStringArray + AllOf *[]*Schema + AnyOf *[]*Schema + OneOf *[]*Schema + Not *Schema + Definitions *[]*NamedSchema + + // 6. Metadata keywords + Title *string + Description *string + Default *yaml.Node + + // 7. Semantic validation with "format" + Format *string +} + +// These helper structs represent "combination" types that generally can +// have values of one type or another. All are used to represent parts +// of Schemas. + +// SchemaNumber represents a value that can be either an Integer or a Float. +type SchemaNumber struct { + Integer *int64 + Float *float64 +} + +// NewSchemaNumberWithInteger creates and returns a new object +func NewSchemaNumberWithInteger(i int64) *SchemaNumber { + result := &SchemaNumber{} + result.Integer = &i + return result +} + +// NewSchemaNumberWithFloat creates and returns a new object +func NewSchemaNumberWithFloat(f float64) *SchemaNumber { + result := &SchemaNumber{} + result.Float = &f + return result +} + +// SchemaOrBoolean represents a value that can be either a Schema or a Boolean. +type SchemaOrBoolean struct { + Schema *Schema + Boolean *bool +} + +// NewSchemaOrBooleanWithSchema creates and returns a new object +func NewSchemaOrBooleanWithSchema(s *Schema) *SchemaOrBoolean { + result := &SchemaOrBoolean{} + result.Schema = s + return result +} + +// NewSchemaOrBooleanWithBoolean creates and returns a new object +func NewSchemaOrBooleanWithBoolean(b bool) *SchemaOrBoolean { + result := &SchemaOrBoolean{} + result.Boolean = &b + return result +} + +// StringOrStringArray represents a value that can be either +// a String or an Array of Strings. +type StringOrStringArray struct { + String *string + StringArray *[]string +} + +// NewStringOrStringArrayWithString creates and returns a new object +func NewStringOrStringArrayWithString(s string) *StringOrStringArray { + result := &StringOrStringArray{} + result.String = &s + return result +} + +// NewStringOrStringArrayWithStringArray creates and returns a new object +func NewStringOrStringArrayWithStringArray(a []string) *StringOrStringArray { + result := &StringOrStringArray{} + result.StringArray = &a + return result +} + +// SchemaOrStringArray represents a value that can be either +// a Schema or an Array of Strings. +type SchemaOrStringArray struct { + Schema *Schema + StringArray *[]string +} + +// SchemaOrSchemaArray represents a value that can be either +// a Schema or an Array of Schemas. +type SchemaOrSchemaArray struct { + Schema *Schema + SchemaArray *[]*Schema +} + +// NewSchemaOrSchemaArrayWithSchema creates and returns a new object +func NewSchemaOrSchemaArrayWithSchema(s *Schema) *SchemaOrSchemaArray { + result := &SchemaOrSchemaArray{} + result.Schema = s + return result +} + +// NewSchemaOrSchemaArrayWithSchemaArray creates and returns a new object +func NewSchemaOrSchemaArrayWithSchemaArray(a []*Schema) *SchemaOrSchemaArray { + result := &SchemaOrSchemaArray{} + result.SchemaArray = &a + return result +} + +// SchemaEnumValue represents a value that can be part of an +// enumeration in a Schema. +type SchemaEnumValue struct { + String *string + Bool *bool +} + +// NamedSchema is a name-value pair that is used to emulate maps +// with ordered keys. +type NamedSchema struct { + Name string + Value *Schema +} + +// NewNamedSchema creates and returns a new object +func NewNamedSchema(name string, value *Schema) *NamedSchema { + return &NamedSchema{Name: name, Value: value} +} + +// NamedSchemaOrStringArray is a name-value pair that is used +// to emulate maps with ordered keys. +type NamedSchemaOrStringArray struct { + Name string + Value *SchemaOrStringArray +} + +// Access named subschemas by name + +func namedSchemaArrayElementWithName(array *[]*NamedSchema, name string) *Schema { + if array == nil { + return nil + } + for _, pair := range *array { + if pair.Name == name { + return pair.Value + } + } + return nil +} + +// PropertyWithName returns the selected element. +func (s *Schema) PropertyWithName(name string) *Schema { + return namedSchemaArrayElementWithName(s.Properties, name) +} + +// PatternPropertyWithName returns the selected element. +func (s *Schema) PatternPropertyWithName(name string) *Schema { + return namedSchemaArrayElementWithName(s.PatternProperties, name) +} + +// DefinitionWithName returns the selected element. +func (s *Schema) DefinitionWithName(name string) *Schema { + return namedSchemaArrayElementWithName(s.Definitions, name) +} + +// AddProperty adds a named property. +func (s *Schema) AddProperty(name string, property *Schema) { + *s.Properties = append(*s.Properties, NewNamedSchema(name, property)) +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/operations.go b/vendor/github.com/googleapis/gnostic/jsonschema/operations.go new file mode 100644 index 000000000..ba8dd4a91 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/operations.go @@ -0,0 +1,394 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonschema + +import ( + "fmt" + "log" + "strings" +) + +// +// OPERATIONS +// The following methods perform operations on Schemas. +// + +// IsEmpty returns true if no members of the Schema are specified. +func (schema *Schema) IsEmpty() bool { + return (schema.Schema == nil) && + (schema.ID == nil) && + (schema.MultipleOf == nil) && + (schema.Maximum == nil) && + (schema.ExclusiveMaximum == nil) && + (schema.Minimum == nil) && + (schema.ExclusiveMinimum == nil) && + (schema.MaxLength == nil) && + (schema.MinLength == nil) && + (schema.Pattern == nil) && + (schema.AdditionalItems == nil) && + (schema.Items == nil) && + (schema.MaxItems == nil) && + (schema.MinItems == nil) && + (schema.UniqueItems == nil) && + (schema.MaxProperties == nil) && + (schema.MinProperties == nil) && + (schema.Required == nil) && + (schema.AdditionalProperties == nil) && + (schema.Properties == nil) && + (schema.PatternProperties == nil) && + (schema.Dependencies == nil) && + (schema.Enumeration == nil) && + (schema.Type == nil) && + (schema.AllOf == nil) && + (schema.AnyOf == nil) && + (schema.OneOf == nil) && + (schema.Not == nil) && + (schema.Definitions == nil) && + (schema.Title == nil) && + (schema.Description == nil) && + (schema.Default == nil) && + (schema.Format == nil) && + (schema.Ref == nil) +} + +// IsEqual returns true if two schemas are equal. +func (schema *Schema) IsEqual(schema2 *Schema) bool { + return schema.String() == schema2.String() +} + +// SchemaOperation represents a function that can be applied to a Schema. +type SchemaOperation func(schema *Schema, context string) + +// Applies a specified function to a Schema and all of the Schemas that it contains. +func (schema *Schema) applyToSchemas(operation SchemaOperation, context string) { + + if schema.AdditionalItems != nil { + s := schema.AdditionalItems.Schema + if s != nil { + s.applyToSchemas(operation, "AdditionalItems") + } + } + + if schema.Items != nil { + if schema.Items.SchemaArray != nil { + for _, s := range *(schema.Items.SchemaArray) { + s.applyToSchemas(operation, "Items.SchemaArray") + } + } else if schema.Items.Schema != nil { + schema.Items.Schema.applyToSchemas(operation, "Items.Schema") + } + } + + if schema.AdditionalProperties != nil { + s := schema.AdditionalProperties.Schema + if s != nil { + s.applyToSchemas(operation, "AdditionalProperties") + } + } + + if schema.Properties != nil { + for _, pair := range *(schema.Properties) { + s := pair.Value + s.applyToSchemas(operation, "Properties") + } + } + if schema.PatternProperties != nil { + for _, pair := range *(schema.PatternProperties) { + s := pair.Value + s.applyToSchemas(operation, "PatternProperties") + } + } + + if schema.Dependencies != nil { + for _, pair := range *(schema.Dependencies) { + schemaOrStringArray := pair.Value + s := schemaOrStringArray.Schema + if s != nil { + s.applyToSchemas(operation, "Dependencies") + } + } + } + + if schema.AllOf != nil { + for _, s := range *(schema.AllOf) { + s.applyToSchemas(operation, "AllOf") + } + } + if schema.AnyOf != nil { + for _, s := range *(schema.AnyOf) { + s.applyToSchemas(operation, "AnyOf") + } + } + if schema.OneOf != nil { + for _, s := range *(schema.OneOf) { + s.applyToSchemas(operation, "OneOf") + } + } + if schema.Not != nil { + schema.Not.applyToSchemas(operation, "Not") + } + + if schema.Definitions != nil { + for _, pair := range *(schema.Definitions) { + s := pair.Value + s.applyToSchemas(operation, "Definitions") + } + } + + operation(schema, context) +} + +// CopyProperties copies all non-nil properties from the source Schema to the schema Schema. +func (schema *Schema) CopyProperties(source *Schema) { + if source.Schema != nil { + schema.Schema = source.Schema + } + if source.ID != nil { + schema.ID = source.ID + } + if source.MultipleOf != nil { + schema.MultipleOf = source.MultipleOf + } + if source.Maximum != nil { + schema.Maximum = source.Maximum + } + if source.ExclusiveMaximum != nil { + schema.ExclusiveMaximum = source.ExclusiveMaximum + } + if source.Minimum != nil { + schema.Minimum = source.Minimum + } + if source.ExclusiveMinimum != nil { + schema.ExclusiveMinimum = source.ExclusiveMinimum + } + if source.MaxLength != nil { + schema.MaxLength = source.MaxLength + } + if source.MinLength != nil { + schema.MinLength = source.MinLength + } + if source.Pattern != nil { + schema.Pattern = source.Pattern + } + if source.AdditionalItems != nil { + schema.AdditionalItems = source.AdditionalItems + } + if source.Items != nil { + schema.Items = source.Items + } + if source.MaxItems != nil { + schema.MaxItems = source.MaxItems + } + if source.MinItems != nil { + schema.MinItems = source.MinItems + } + if source.UniqueItems != nil { + schema.UniqueItems = source.UniqueItems + } + if source.MaxProperties != nil { + schema.MaxProperties = source.MaxProperties + } + if source.MinProperties != nil { + schema.MinProperties = source.MinProperties + } + if source.Required != nil { + schema.Required = source.Required + } + if source.AdditionalProperties != nil { + schema.AdditionalProperties = source.AdditionalProperties + } + if source.Properties != nil { + schema.Properties = source.Properties + } + if source.PatternProperties != nil { + schema.PatternProperties = source.PatternProperties + } + if source.Dependencies != nil { + schema.Dependencies = source.Dependencies + } + if source.Enumeration != nil { + schema.Enumeration = source.Enumeration + } + if source.Type != nil { + schema.Type = source.Type + } + if source.AllOf != nil { + schema.AllOf = source.AllOf + } + if source.AnyOf != nil { + schema.AnyOf = source.AnyOf + } + if source.OneOf != nil { + schema.OneOf = source.OneOf + } + if source.Not != nil { + schema.Not = source.Not + } + if source.Definitions != nil { + schema.Definitions = source.Definitions + } + if source.Title != nil { + schema.Title = source.Title + } + if source.Description != nil { + schema.Description = source.Description + } + if source.Default != nil { + schema.Default = source.Default + } + if source.Format != nil { + schema.Format = source.Format + } + if source.Ref != nil { + schema.Ref = source.Ref + } +} + +// TypeIs returns true if the Type of a Schema includes the specified type +func (schema *Schema) TypeIs(typeName string) bool { + if schema.Type != nil { + // the schema Type is either a string or an array of strings + if schema.Type.String != nil { + return (*(schema.Type.String) == typeName) + } else if schema.Type.StringArray != nil { + for _, n := range *(schema.Type.StringArray) { + if n == typeName { + return true + } + } + } + } + return false +} + +// ResolveRefs resolves "$ref" elements in a Schema and its children. +// But if a reference refers to an object type, is inside a oneOf, or contains a oneOf, +// the reference is kept and we expect downstream tools to separately model these +// referenced schemas. +func (schema *Schema) ResolveRefs() { + rootSchema := schema + count := 1 + for count > 0 { + count = 0 + schema.applyToSchemas( + func(schema *Schema, context string) { + if schema.Ref != nil { + resolvedRef, err := rootSchema.resolveJSONPointer(*(schema.Ref)) + if err != nil { + log.Printf("%+v", err) + } else if resolvedRef.TypeIs("object") { + // don't substitute for objects, we'll model the referenced schema with a class + } else if context == "OneOf" { + // don't substitute for references inside oneOf declarations + } else if resolvedRef.OneOf != nil { + // don't substitute for references that contain oneOf declarations + } else if resolvedRef.AdditionalProperties != nil { + // don't substitute for references that look like objects + } else { + schema.Ref = nil + schema.CopyProperties(resolvedRef) + count++ + } + } + }, "") + } +} + +// resolveJSONPointer resolves JSON pointers. +// This current implementation is very crude and custom for OpenAPI 2.0 schemas. +// It panics for any pointer that it is unable to resolve. +func (schema *Schema) resolveJSONPointer(ref string) (result *Schema, err error) { + parts := strings.Split(ref, "#") + if len(parts) == 2 { + documentName := parts[0] + "#" + if documentName == "#" && schema.ID != nil { + documentName = *(schema.ID) + } + path := parts[1] + document := schemas[documentName] + pathParts := strings.Split(path, "/") + + // we currently do a very limited (hard-coded) resolution of certain paths and log errors for missed cases + if len(pathParts) == 1 { + return document, nil + } else if len(pathParts) == 3 { + switch pathParts[1] { + case "definitions": + dictionary := document.Definitions + for _, pair := range *dictionary { + if pair.Name == pathParts[2] { + result = pair.Value + } + } + case "properties": + dictionary := document.Properties + for _, pair := range *dictionary { + if pair.Name == pathParts[2] { + result = pair.Value + } + } + default: + break + } + } + } + if result == nil { + return nil, fmt.Errorf("unresolved pointer: %+v", ref) + } + return result, nil +} + +// ResolveAllOfs replaces "allOf" elements by merging their properties into the parent Schema. +func (schema *Schema) ResolveAllOfs() { + schema.applyToSchemas( + func(schema *Schema, context string) { + if schema.AllOf != nil { + for _, allOf := range *(schema.AllOf) { + schema.CopyProperties(allOf) + } + schema.AllOf = nil + } + }, "resolveAllOfs") +} + +// ResolveAnyOfs replaces all "anyOf" elements with "oneOf". +func (schema *Schema) ResolveAnyOfs() { + schema.applyToSchemas( + func(schema *Schema, context string) { + if schema.AnyOf != nil { + schema.OneOf = schema.AnyOf + schema.AnyOf = nil + } + }, "resolveAnyOfs") +} + +// return a pointer to a copy of a passed-in string +func stringptr(input string) (output *string) { + return &input +} + +// CopyOfficialSchemaProperty copies a named property from the official JSON Schema definition +func (schema *Schema) CopyOfficialSchemaProperty(name string) { + *schema.Properties = append(*schema.Properties, + NewNamedSchema(name, + &Schema{Ref: stringptr("http://json-schema.org/draft-04/schema#/properties/" + name)})) +} + +// CopyOfficialSchemaProperties copies named properties from the official JSON Schema definition +func (schema *Schema) CopyOfficialSchemaProperties(names []string) { + for _, name := range names { + schema.CopyOfficialSchemaProperty(name) + } +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/reader.go b/vendor/github.com/googleapis/gnostic/jsonschema/reader.go new file mode 100644 index 000000000..b8583d466 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/reader.go @@ -0,0 +1,442 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate go run generate-base.go + +package jsonschema + +import ( + "fmt" + "io/ioutil" + "strconv" + + "gopkg.in/yaml.v3" +) + +// This is a global map of all known Schemas. +// It is initialized when the first Schema is created and inserted. +var schemas map[string]*Schema + +// NewBaseSchema builds a schema object from an embedded json representation. +func NewBaseSchema() (schema *Schema, err error) { + b, err := baseSchemaBytes() + if err != nil { + return nil, err + } + var node yaml.Node + err = yaml.Unmarshal(b, &node) + if err != nil { + return nil, err + } + return NewSchemaFromObject(&node), nil +} + +// NewSchemaFromFile reads a schema from a file. +// Currently this assumes that schemas are stored in the source distribution of this project. +func NewSchemaFromFile(filename string) (schema *Schema, err error) { + file, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + var node yaml.Node + err = yaml.Unmarshal(file, &node) + if err != nil { + return nil, err + } + return NewSchemaFromObject(&node), nil +} + +// NewSchemaFromObject constructs a schema from a parsed JSON object. +// Due to the complexity of the schema representation, this is a +// custom reader and not the standard Go JSON reader (encoding/json). +func NewSchemaFromObject(jsonData *yaml.Node) *Schema { + switch jsonData.Kind { + case yaml.DocumentNode: + return NewSchemaFromObject(jsonData.Content[0]) + case yaml.MappingNode: + schema := &Schema{} + + for i := 0; i < len(jsonData.Content); i += 2 { + k := jsonData.Content[i].Value + v := jsonData.Content[i+1] + + switch k { + case "$schema": + schema.Schema = schema.stringValue(v) + case "id": + schema.ID = schema.stringValue(v) + + case "multipleOf": + schema.MultipleOf = schema.numberValue(v) + case "maximum": + schema.Maximum = schema.numberValue(v) + case "exclusiveMaximum": + schema.ExclusiveMaximum = schema.boolValue(v) + case "minimum": + schema.Minimum = schema.numberValue(v) + case "exclusiveMinimum": + schema.ExclusiveMinimum = schema.boolValue(v) + + case "maxLength": + schema.MaxLength = schema.intValue(v) + case "minLength": + schema.MinLength = schema.intValue(v) + case "pattern": + schema.Pattern = schema.stringValue(v) + + case "additionalItems": + schema.AdditionalItems = schema.schemaOrBooleanValue(v) + case "items": + schema.Items = schema.schemaOrSchemaArrayValue(v) + case "maxItems": + schema.MaxItems = schema.intValue(v) + case "minItems": + schema.MinItems = schema.intValue(v) + case "uniqueItems": + schema.UniqueItems = schema.boolValue(v) + + case "maxProperties": + schema.MaxProperties = schema.intValue(v) + case "minProperties": + schema.MinProperties = schema.intValue(v) + case "required": + schema.Required = schema.arrayOfStringsValue(v) + case "additionalProperties": + schema.AdditionalProperties = schema.schemaOrBooleanValue(v) + case "properties": + schema.Properties = schema.mapOfSchemasValue(v) + case "patternProperties": + schema.PatternProperties = schema.mapOfSchemasValue(v) + case "dependencies": + schema.Dependencies = schema.mapOfSchemasOrStringArraysValue(v) + + case "enum": + schema.Enumeration = schema.arrayOfEnumValuesValue(v) + + case "type": + schema.Type = schema.stringOrStringArrayValue(v) + case "allOf": + schema.AllOf = schema.arrayOfSchemasValue(v) + case "anyOf": + schema.AnyOf = schema.arrayOfSchemasValue(v) + case "oneOf": + schema.OneOf = schema.arrayOfSchemasValue(v) + case "not": + schema.Not = NewSchemaFromObject(v) + case "definitions": + schema.Definitions = schema.mapOfSchemasValue(v) + + case "title": + schema.Title = schema.stringValue(v) + case "description": + schema.Description = schema.stringValue(v) + + case "default": + schema.Default = v + + case "format": + schema.Format = schema.stringValue(v) + case "$ref": + schema.Ref = schema.stringValue(v) + default: + fmt.Printf("UNSUPPORTED (%s)\n", k) + } + } + + // insert schema in global map + if schema.ID != nil { + if schemas == nil { + schemas = make(map[string]*Schema, 0) + } + schemas[*(schema.ID)] = schema + } + return schema + + default: + fmt.Printf("schemaValue: unexpected node %+v\n", jsonData) + return nil + } + + return nil +} + +// +// BUILDERS +// The following methods build elements of Schemas from interface{} values. +// Each returns nil if it is unable to build the desired element. +// + +// Gets the string value of an interface{} value if possible. +func (schema *Schema) stringValue(v *yaml.Node) *string { + switch v.Kind { + case yaml.ScalarNode: + return &v.Value + default: + fmt.Printf("stringValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets the numeric value of an interface{} value if possible. +func (schema *Schema) numberValue(v *yaml.Node) *SchemaNumber { + number := &SchemaNumber{} + switch v.Kind { + case yaml.ScalarNode: + switch v.Tag { + case "!!float": + v2, _ := strconv.ParseFloat(v.Value, 64) + number.Float = &v2 + return number + case "!!int": + v2, _ := strconv.ParseInt(v.Value, 10, 64) + number.Integer = &v2 + return number + default: + fmt.Printf("stringValue: unexpected node %+v\n", v) + } + default: + fmt.Printf("stringValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets the integer value of an interface{} value if possible. +func (schema *Schema) intValue(v *yaml.Node) *int64 { + switch v.Kind { + case yaml.ScalarNode: + switch v.Tag { + case "!!float": + v2, _ := strconv.ParseFloat(v.Value, 64) + v3 := int64(v2) + return &v3 + case "!!int": + v2, _ := strconv.ParseInt(v.Value, 10, 64) + return &v2 + default: + fmt.Printf("intValue: unexpected node %+v\n", v) + } + default: + fmt.Printf("intValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets the bool value of an interface{} value if possible. +func (schema *Schema) boolValue(v *yaml.Node) *bool { + switch v.Kind { + case yaml.ScalarNode: + switch v.Tag { + case "!!bool": + v2, _ := strconv.ParseBool(v.Value) + return &v2 + default: + fmt.Printf("boolValue: unexpected node %+v\n", v) + } + default: + fmt.Printf("boolValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets a map of Schemas from an interface{} value if possible. +func (schema *Schema) mapOfSchemasValue(v *yaml.Node) *[]*NamedSchema { + switch v.Kind { + case yaml.MappingNode: + m := make([]*NamedSchema, 0) + for i := 0; i < len(v.Content); i += 2 { + k2 := v.Content[i].Value + v2 := v.Content[i+1] + pair := &NamedSchema{Name: k2, Value: NewSchemaFromObject(v2)} + m = append(m, pair) + } + return &m + default: + fmt.Printf("mapOfSchemasValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets an array of Schemas from an interface{} value if possible. +func (schema *Schema) arrayOfSchemasValue(v *yaml.Node) *[]*Schema { + switch v.Kind { + case yaml.SequenceNode: + m := make([]*Schema, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.MappingNode: + s := NewSchemaFromObject(v2) + m = append(m, s) + default: + fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v2) + } + } + return &m + case yaml.MappingNode: + m := make([]*Schema, 0) + s := NewSchemaFromObject(v) + m = append(m, s) + return &m + default: + fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets a Schema or an array of Schemas from an interface{} value if possible. +func (schema *Schema) schemaOrSchemaArrayValue(v *yaml.Node) *SchemaOrSchemaArray { + switch v.Kind { + case yaml.SequenceNode: + m := make([]*Schema, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.MappingNode: + s := NewSchemaFromObject(v2) + m = append(m, s) + default: + fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v2) + } + } + return &SchemaOrSchemaArray{SchemaArray: &m} + case yaml.MappingNode: + s := NewSchemaFromObject(v) + return &SchemaOrSchemaArray{Schema: s} + default: + fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets an array of strings from an interface{} value if possible. +func (schema *Schema) arrayOfStringsValue(v *yaml.Node) *[]string { + switch v.Kind { + case yaml.ScalarNode: + a := []string{v.Value} + return &a + case yaml.SequenceNode: + a := make([]string, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.ScalarNode: + a = append(a, v2.Value) + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) + } + } + return &a + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets a string or an array of strings from an interface{} value if possible. +func (schema *Schema) stringOrStringArrayValue(v *yaml.Node) *StringOrStringArray { + switch v.Kind { + case yaml.ScalarNode: + s := &StringOrStringArray{} + s.String = &v.Value + return s + case yaml.SequenceNode: + a := make([]string, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.ScalarNode: + a = append(a, v2.Value) + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) + } + } + s := &StringOrStringArray{} + s.StringArray = &a + return s + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets an array of enum values from an interface{} value if possible. +func (schema *Schema) arrayOfEnumValuesValue(v *yaml.Node) *[]SchemaEnumValue { + a := make([]SchemaEnumValue, 0) + switch v.Kind { + case yaml.SequenceNode: + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.ScalarNode: + switch v2.Tag { + case "!!str": + a = append(a, SchemaEnumValue{String: &v2.Value}) + case "!!bool": + v3, _ := strconv.ParseBool(v2.Value) + a = append(a, SchemaEnumValue{Bool: &v3}) + default: + fmt.Printf("arrayOfEnumValuesValue: unexpected type %s\n", v2.Tag) + } + default: + fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v2) + } + } + default: + fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v) + } + return &a +} + +// Gets a map of schemas or string arrays from an interface{} value if possible. +func (schema *Schema) mapOfSchemasOrStringArraysValue(v *yaml.Node) *[]*NamedSchemaOrStringArray { + m := make([]*NamedSchemaOrStringArray, 0) + switch v.Kind { + case yaml.MappingNode: + for i := 0; i < len(v.Content); i += 2 { + k2 := v.Content[i].Value + v2 := v.Content[i+1] + switch v2.Kind { + case yaml.SequenceNode: + a := make([]string, 0) + for _, v3 := range v2.Content { + switch v3.Kind { + case yaml.ScalarNode: + a = append(a, v3.Value) + default: + fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v3) + } + } + s := &SchemaOrStringArray{} + s.StringArray = &a + pair := &NamedSchemaOrStringArray{Name: k2, Value: s} + m = append(m, pair) + default: + fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v2) + } + } + default: + fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v) + } + return &m +} + +// Gets a schema or a boolean value from an interface{} value if possible. +func (schema *Schema) schemaOrBooleanValue(v *yaml.Node) *SchemaOrBoolean { + schemaOrBoolean := &SchemaOrBoolean{} + switch v.Kind { + case yaml.ScalarNode: + v2, _ := strconv.ParseBool(v.Value) + schemaOrBoolean.Boolean = &v2 + case yaml.MappingNode: + schemaOrBoolean.Schema = NewSchemaFromObject(v) + default: + fmt.Printf("schemaOrBooleanValue: unexpected node %+v\n", v) + } + return schemaOrBoolean +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/schema.json b/vendor/github.com/googleapis/gnostic/jsonschema/schema.json new file mode 100644 index 000000000..85eb502a6 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/schema.json @@ -0,0 +1,150 @@ +{ + "id": "http://json-schema.org/draft-04/schema#", + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uri" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/writer.go b/vendor/github.com/googleapis/gnostic/jsonschema/writer.go new file mode 100644 index 000000000..340dc5f93 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/writer.go @@ -0,0 +1,369 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonschema + +import ( + "fmt" + + "gopkg.in/yaml.v3" +) + +const indentation = " " + +func renderMappingNode(node *yaml.Node, indent string) (result string) { + result = "{\n" + innerIndent := indent + indentation + for i := 0; i < len(node.Content); i += 2 { + // first print the key + key := node.Content[i].Value + result += fmt.Sprintf("%s\"%+v\": ", innerIndent, key) + // then the value + value := node.Content[i+1] + switch value.Kind { + case yaml.ScalarNode: + result += "\"" + value.Value + "\"" + case yaml.MappingNode: + result += renderMappingNode(value, innerIndent) + case yaml.SequenceNode: + result += renderSequenceNode(value, innerIndent) + default: + result += fmt.Sprintf("???MapItem(Key:%+v, Value:%T)", value, value) + } + if i < len(node.Content)-2 { + result += "," + } + result += "\n" + } + + result += indent + "}" + return result +} + +func renderSequenceNode(node *yaml.Node, indent string) (result string) { + result = "[\n" + innerIndent := indent + indentation + for i := 0; i < len(node.Content); i++ { + item := node.Content[i] + switch item.Kind { + case yaml.ScalarNode: + result += innerIndent + "\"" + item.Value + "\"" + case yaml.MappingNode: + result += innerIndent + renderMappingNode(item, innerIndent) + "" + default: + result += innerIndent + fmt.Sprintf("???ArrayItem(%+v)", item) + } + if i < len(node.Content)-1 { + result += "," + } + result += "\n" + } + result += indent + "]" + return result +} + +func renderStringArray(array []string, indent string) (result string) { + result = "[\n" + innerIndent := indent + indentation + for i, item := range array { + result += innerIndent + "\"" + item + "\"" + if i < len(array)-1 { + result += "," + } + result += "\n" + } + result += indent + "]" + return result +} + +// Render renders a yaml.Node as JSON +func Render(node *yaml.Node) string { + if node.Kind == yaml.DocumentNode { + if len(node.Content) == 1 { + return Render(node.Content[0]) + } + } else if node.Kind == yaml.MappingNode { + return renderMappingNode(node, "") + "\n" + } else if node.Kind == yaml.SequenceNode { + return renderSequenceNode(node, "") + "\n" + } + return "" +} + +func (object *SchemaNumber) nodeValue() *yaml.Node { + if object.Integer != nil { + return nodeForInt64(*object.Integer) + } else if object.Float != nil { + return nodeForFloat64(*object.Float) + } else { + return nil + } +} + +func (object *SchemaOrBoolean) nodeValue() *yaml.Node { + if object.Schema != nil { + return object.Schema.nodeValue() + } else if object.Boolean != nil { + return nodeForBoolean(*object.Boolean) + } else { + return nil + } +} + +func nodeForStringArray(array []string) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, item := range array { + content = append(content, nodeForString(item)) + } + return nodeForSequence(content) +} + +func nodeForSchemaArray(array []*Schema) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, item := range array { + content = append(content, item.nodeValue()) + } + return nodeForSequence(content) +} + +func (object *StringOrStringArray) nodeValue() *yaml.Node { + if object.String != nil { + return nodeForString(*object.String) + } else if object.StringArray != nil { + return nodeForStringArray(*(object.StringArray)) + } else { + return nil + } +} + +func (object *SchemaOrStringArray) nodeValue() *yaml.Node { + if object.Schema != nil { + return object.Schema.nodeValue() + } else if object.StringArray != nil { + return nodeForStringArray(*(object.StringArray)) + } else { + return nil + } +} + +func (object *SchemaOrSchemaArray) nodeValue() *yaml.Node { + if object.Schema != nil { + return object.Schema.nodeValue() + } else if object.SchemaArray != nil { + return nodeForSchemaArray(*(object.SchemaArray)) + } else { + return nil + } +} + +func (object *SchemaEnumValue) nodeValue() *yaml.Node { + if object.String != nil { + return nodeForString(*object.String) + } else if object.Bool != nil { + return nodeForBoolean(*object.Bool) + } else { + return nil + } +} + +func nodeForNamedSchemaArray(array *[]*NamedSchema) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, pair := range *(array) { + content = appendPair(content, pair.Name, pair.Value.nodeValue()) + } + return nodeForMapping(content) +} + +func nodeForNamedSchemaOrStringArray(array *[]*NamedSchemaOrStringArray) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, pair := range *(array) { + content = appendPair(content, pair.Name, pair.Value.nodeValue()) + } + return nodeForMapping(content) +} + +func nodeForSchemaEnumArray(array *[]SchemaEnumValue) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, item := range *array { + content = append(content, item.nodeValue()) + } + return nodeForSequence(content) +} + +func nodeForMapping(content []*yaml.Node) *yaml.Node { + return &yaml.Node{ + Kind: yaml.MappingNode, + Content: content, + } +} + +func nodeForSequence(content []*yaml.Node) *yaml.Node { + return &yaml.Node{ + Kind: yaml.SequenceNode, + Content: content, + } +} + +func nodeForString(value string) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: value, + } +} + +func nodeForBoolean(value bool) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!bool", + Value: fmt.Sprintf("%t", value), + } +} + +func nodeForInt64(value int64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!int", + Value: fmt.Sprintf("%d", value), + } +} + +func nodeForFloat64(value float64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!float", + Value: fmt.Sprintf("%f", value), + } +} + +func appendPair(nodes []*yaml.Node, name string, value *yaml.Node) []*yaml.Node { + nodes = append(nodes, nodeForString(name)) + nodes = append(nodes, value) + return nodes +} + +func (schema *Schema) nodeValue() *yaml.Node { + n := &yaml.Node{Kind: yaml.MappingNode} + content := make([]*yaml.Node, 0) + if schema.Title != nil { + content = appendPair(content, "title", nodeForString(*schema.Title)) + } + if schema.ID != nil { + content = appendPair(content, "id", nodeForString(*schema.ID)) + } + if schema.Schema != nil { + content = appendPair(content, "$schema", nodeForString(*schema.Schema)) + } + if schema.Type != nil { + content = appendPair(content, "type", schema.Type.nodeValue()) + } + if schema.Items != nil { + content = appendPair(content, "items", schema.Items.nodeValue()) + } + if schema.Description != nil { + content = appendPair(content, "description", nodeForString(*schema.Description)) + } + if schema.Required != nil { + content = appendPair(content, "required", nodeForStringArray(*schema.Required)) + } + if schema.AdditionalProperties != nil { + content = appendPair(content, "additionalProperties", schema.AdditionalProperties.nodeValue()) + } + if schema.PatternProperties != nil { + content = appendPair(content, "patternProperties", nodeForNamedSchemaArray(schema.PatternProperties)) + } + if schema.Properties != nil { + content = appendPair(content, "properties", nodeForNamedSchemaArray(schema.Properties)) + } + if schema.Dependencies != nil { + content = appendPair(content, "dependencies", nodeForNamedSchemaOrStringArray(schema.Dependencies)) + } + if schema.Ref != nil { + content = appendPair(content, "$ref", nodeForString(*schema.Ref)) + } + if schema.MultipleOf != nil { + content = appendPair(content, "multipleOf", schema.MultipleOf.nodeValue()) + } + if schema.Maximum != nil { + content = appendPair(content, "maximum", schema.Maximum.nodeValue()) + } + if schema.ExclusiveMaximum != nil { + content = appendPair(content, "exclusiveMaximum", nodeForBoolean(*schema.ExclusiveMaximum)) + } + if schema.Minimum != nil { + content = appendPair(content, "minimum", schema.Minimum.nodeValue()) + } + if schema.ExclusiveMinimum != nil { + content = appendPair(content, "exclusiveMinimum", nodeForBoolean(*schema.ExclusiveMinimum)) + } + if schema.MaxLength != nil { + content = appendPair(content, "maxLength", nodeForInt64(*schema.MaxLength)) + } + if schema.MinLength != nil { + content = appendPair(content, "minLength", nodeForInt64(*schema.MinLength)) + } + if schema.Pattern != nil { + content = appendPair(content, "pattern", nodeForString(*schema.Pattern)) + } + if schema.AdditionalItems != nil { + content = appendPair(content, "additionalItems", schema.AdditionalItems.nodeValue()) + } + if schema.MaxItems != nil { + content = appendPair(content, "maxItems", nodeForInt64(*schema.MaxItems)) + } + if schema.MinItems != nil { + content = appendPair(content, "minItems", nodeForInt64(*schema.MinItems)) + } + if schema.UniqueItems != nil { + content = appendPair(content, "uniqueItems", nodeForBoolean(*schema.UniqueItems)) + } + if schema.MaxProperties != nil { + content = appendPair(content, "maxProperties", nodeForInt64(*schema.MaxProperties)) + } + if schema.MinProperties != nil { + content = appendPair(content, "minProperties", nodeForInt64(*schema.MinProperties)) + } + if schema.Enumeration != nil { + content = appendPair(content, "enum", nodeForSchemaEnumArray(schema.Enumeration)) + } + if schema.AllOf != nil { + content = appendPair(content, "allOf", nodeForSchemaArray(*schema.AllOf)) + } + if schema.AnyOf != nil { + content = appendPair(content, "anyOf", nodeForSchemaArray(*schema.AnyOf)) + } + if schema.OneOf != nil { + content = appendPair(content, "oneOf", nodeForSchemaArray(*schema.OneOf)) + } + if schema.Not != nil { + content = appendPair(content, "not", schema.Not.nodeValue()) + } + if schema.Definitions != nil { + content = appendPair(content, "definitions", nodeForNamedSchemaArray(schema.Definitions)) + } + if schema.Default != nil { + // m = append(m, yaml.MapItem{Key: "default", Value: *schema.Default}) + } + if schema.Format != nil { + content = appendPair(content, "format", nodeForString(*schema.Format)) + } + n.Content = content + return n +} + +// JSONString returns a json representation of a schema. +func (schema *Schema) JSONString() string { + node := schema.nodeValue() + return Render(node) +} diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go index 052a38b4c..4e5e9d8fd 100644 --- a/vendor/github.com/hashicorp/golang-lru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/lru.go @@ -37,7 +37,7 @@ func (c *Cache) Purge() { c.lock.Unlock() } -// Add adds a value to the cache. Returns true if an eviction occurred. +// Add adds a value to the cache. Returns true if an eviction occurred. func (c *Cache) Add(key, value interface{}) (evicted bool) { c.lock.Lock() evicted = c.lru.Add(key, value) @@ -71,8 +71,8 @@ func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { return value, ok } -// ContainsOrAdd checks if a key is in the cache without updating the -// recent-ness or deleting it for being stale, and if not, adds the value. +// ContainsOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { c.lock.Lock() @@ -85,6 +85,22 @@ func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { return false, evicted } +// PeekOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache) PeekOrAdd(key, value interface{}) (previous interface{}, ok, evicted bool) { + c.lock.Lock() + defer c.lock.Unlock() + + previous, ok = c.lru.Peek(key) + if ok { + return previous, true, false + } + + evicted = c.lru.Add(key, value) + return nil, false, evicted +} + // Remove removes the provided key from the cache. func (c *Cache) Remove(key interface{}) (present bool) { c.lock.Lock() diff --git a/vendor/github.com/hpcloud/tail/.gitignore b/vendor/github.com/hpcloud/tail/.gitignore deleted file mode 100644 index 6d9953c3c..000000000 --- a/vendor/github.com/hpcloud/tail/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -.test -.go - diff --git a/vendor/github.com/hpcloud/tail/CHANGES.md b/vendor/github.com/hpcloud/tail/CHANGES.md deleted file mode 100644 index 422790c07..000000000 --- a/vendor/github.com/hpcloud/tail/CHANGES.md +++ /dev/null @@ -1,63 +0,0 @@ -# API v1 (gopkg.in/hpcloud/tail.v1) - -## April, 2016 - -* Migrated to godep, as depman is not longer supported -* Introduced golang vendoring feature -* Fixed issue [#57](https://github.com/hpcloud/tail/issues/57) related to reopen deleted file - -## July, 2015 - -* Fix inotify watcher leak; remove `Cleanup` (#51) - -# API v0 (gopkg.in/hpcloud/tail.v0) - -## June, 2015 - -* Don't return partial lines (PR #40) -* Use stable version of fsnotify (#46) - -## July, 2014 - -* Fix tail for Windows (PR #36) - -## May, 2014 - -* Improved rate limiting using leaky bucket (PR #29) -* Fix odd line splitting (PR #30) - -## Apr, 2014 - -* LimitRate now discards read buffer (PR #28) -* allow reading of longer lines if MaxLineSize is unset (PR #24) -* updated deps.json to latest fsnotify (441bbc86b1) - -## Feb, 2014 - -* added `Config.Logger` to suppress library logging - -## Nov, 2013 - -* add Cleanup to remove leaky inotify watches (PR #20) - -## Aug, 2013 - -* redesigned Location field (PR #12) -* add tail.Tell (PR #14) - -## July, 2013 - -* Rate limiting (PR #10) - -## May, 2013 - -* Detect file deletions/renames in polling file watcher (PR #1) -* Detect file truncation -* Fix potential race condition when reopening the file (issue 5) -* Fix potential blocking of `tail.Stop` (issue 4) -* Fix uncleaned up ChangeEvents goroutines after calling tail.Stop -* Support Follow=false - -## Feb, 2013 - -* Initial open source release diff --git a/vendor/github.com/hpcloud/tail/Dockerfile b/vendor/github.com/hpcloud/tail/Dockerfile deleted file mode 100644 index cd297b940..000000000 --- a/vendor/github.com/hpcloud/tail/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -FROM golang - -RUN mkdir -p $GOPATH/src/github.com/hpcloud/tail/ -ADD . $GOPATH/src/github.com/hpcloud/tail/ - -# expecting to fetch dependencies successfully. -RUN go get -v github.com/hpcloud/tail - -# expecting to run the test successfully. -RUN go test -v github.com/hpcloud/tail - -# expecting to install successfully -RUN go install -v github.com/hpcloud/tail -RUN go install -v github.com/hpcloud/tail/cmd/gotail - -RUN $GOPATH/bin/gotail -h || true - -ENV PATH $GOPATH/bin:$PATH -CMD ["gotail"] diff --git a/vendor/github.com/hpcloud/tail/Makefile b/vendor/github.com/hpcloud/tail/Makefile deleted file mode 100644 index 6591b24fc..000000000 --- a/vendor/github.com/hpcloud/tail/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -default: test - -test: *.go - go test -v -race ./... - -fmt: - gofmt -w . - -# Run the test in an isolated environment. -fulltest: - docker build -t hpcloud/tail . diff --git a/vendor/github.com/hpcloud/tail/README.md b/vendor/github.com/hpcloud/tail/README.md deleted file mode 100644 index fb7fbc26c..000000000 --- a/vendor/github.com/hpcloud/tail/README.md +++ /dev/null @@ -1,28 +0,0 @@ -[![Build Status](https://travis-ci.org/hpcloud/tail.svg)](https://travis-ci.org/hpcloud/tail) -[![Build status](https://ci.appveyor.com/api/projects/status/kohpsf3rvhjhrox6?svg=true)](https://ci.appveyor.com/project/HelionCloudFoundry/tail) - -# Go package for tail-ing files - -A Go package striving to emulate the features of the BSD `tail` program. - -```Go -t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true}) -for line := range t.Lines { - fmt.Println(line.Text) -} -``` - -See [API documentation](http://godoc.org/github.com/hpcloud/tail). - -## Log rotation - -Tail comes with full support for truncation/move detection as it is -designed to work with log rotation tools. - -## Installing - - go get github.com/hpcloud/tail/... - -## Windows support - -This package [needs assistance](https://github.com/hpcloud/tail/labels/Windows) for full Windows support. diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/github.com/imdario/mergo/.deepsource.toml new file mode 100644 index 000000000..8a0681af8 --- /dev/null +++ b/vendor/github.com/imdario/mergo/.deepsource.toml @@ -0,0 +1,12 @@ +version = 1 + +test_patterns = [ + "*_test.go" +] + +[[analyzers]] +name = "go" +enabled = true + + [analyzers.meta] + import_path = "github.com/imdario/mergo" \ No newline at end of file diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index 02fc81e06..075b4d78e 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -2,6 +2,8 @@ A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. ## Status @@ -9,36 +11,37 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). [![GoDoc][3]][4] -[![GoCard][5]][6] +[![GitHub release][5]][6] +[![GoCard][7]][8] [![Build Status][1]][2] -[![Coverage Status][7]][8] -[![Sourcegraph][9]][10] +[![Coverage Status][9]][10] +[![Sourcegraph][11]][12] [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield) [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo [3]: https://godoc.org/github.com/imdario/mergo?status.svg [4]: https://godoc.org/github.com/imdario/mergo -[5]: https://goreportcard.com/badge/imdario/mergo -[6]: https://goreportcard.com/report/github.com/imdario/mergo -[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[8]: https://coveralls.io/github/imdario/mergo?branch=master -[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[10]: https://sourcegraph.com/github.com/imdario/mergo?badge - -### Latest release - -[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7). +[5]: https://img.shields.io/github/release/imdario/mergo.svg +[6]: https://github.com/imdario/mergo/releases +[7]: https://goreportcard.com/badge/imdario/mergo +[8]: https://goreportcard.com/report/github.com/imdario/mergo +[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[10]: https://coveralls.io/github/imdario/mergo?branch=master +[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[12]: https://sourcegraph.com/github.com/imdario/mergo?badge ### Important note -Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. +Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules. -If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). +Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). ### Donations -If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: +If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: Buy Me a Coffee at ko-fi.com [![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) @@ -87,8 +90,9 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) - [jnuthong/item_search](https://github.com/jnuthong/item_search) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) +- [janoszen/containerssh](https://github.com/janoszen/containerssh) -## Installation +## Install go get github.com/imdario/mergo @@ -99,7 +103,7 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month ## Usage -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). ```go if err := mergo.Merge(&dst, src); err != nil { @@ -125,9 +129,7 @@ if err := mergo.Map(&dst, srcMap); err != nil { Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. -More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). - -### Nice example +Here is a nice example: ```go package main @@ -175,10 +177,10 @@ import ( "time" ) -type timeTransfomer struct { +type timeTransformer struct { } -func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { +func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { if typ == reflect.TypeOf(time.Time{}) { return func(dst, src reflect.Value) error { if dst.CanSet() { @@ -202,7 +204,7 @@ type Snapshot struct { func main() { src := Snapshot{time.Now()} dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) fmt.Println(dest) // Will print // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go index 6e9aa7baf..fcd985f99 100644 --- a/vendor/github.com/imdario/mergo/doc.go +++ b/vendor/github.com/imdario/mergo/doc.go @@ -4,41 +4,140 @@ // license that can be found in the LICENSE file. /* -Package mergo merges same-type structs and maps by setting default values in zero-value fields. +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. -Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Status + +It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. + +Important note + +Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. + +Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). + +Install + +Do your usual installation procedure: + + go get github.com/imdario/mergo + + // use in your .go code + import ( + "github.com/imdario/mergo" + ) Usage -From my own work-in-progress project: +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + + if err := mergo.Merge(&dst, src); err != nil { + // ... + } + +Also, you can merge overwriting values using the transformer WithOverride. + + if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... + } + +Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. + + if err := mergo.Map(&dst, srcMap); err != nil { + // ... + } + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. + +Here is a nice example: + + package main + + import ( + "fmt" + "github.com/imdario/mergo" + ) - type networkConfig struct { - Protocol string - Address string - ServerType string `json: "server_type"` - Port uint16 + type Foo struct { + A string + B int64 } - type FssnConfig struct { - Network networkConfig + func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} } - var fssnDefault = FssnConfig { - networkConfig { - "tcp", - "127.0.0.1", - "http", - 31560, - }, +Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? + + package main + + import ( + "fmt" + "github.com/imdario/mergo" + "reflect" + "time" + ) + + type timeTransformer struct { } - // Inside a function [...] + func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil + } + + type Snapshot struct { + Time time.Time + // ... + } - if err := mergo.Merge(&config, fssnDefault); err != nil { - log.Fatal(err) + func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } } - // More code [...] +Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario + +About + +Written by Dario Castañé: https://da.rio.hn + +License + +BSD 3-Clause license, as Go language. */ package mergo diff --git a/vendor/github.com/imdario/mergo/go.mod b/vendor/github.com/imdario/mergo/go.mod new file mode 100644 index 000000000..3d689d93e --- /dev/null +++ b/vendor/github.com/imdario/mergo/go.mod @@ -0,0 +1,5 @@ +module github.com/imdario/mergo + +go 1.13 + +require gopkg.in/yaml.v2 v2.3.0 diff --git a/vendor/github.com/imdario/mergo/go.sum b/vendor/github.com/imdario/mergo/go.sum new file mode 100644 index 000000000..168980da5 --- /dev/null +++ b/vendor/github.com/imdario/mergo/go.sum @@ -0,0 +1,4 @@ +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index 3f5afa83a..a13a7ee46 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -141,6 +141,9 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { } func _map(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerAgument + } var ( vDst, vSrc reflect.Value err error diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 3fb6c64d0..11a8c156d 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -13,18 +13,30 @@ import ( "reflect" ) -func hasExportedField(dst reflect.Value) (exported bool) { +func hasMergeableFields(dst reflect.Value) (exported bool) { for i, n := 0, dst.NumField(); i < n; i++ { field := dst.Type().Field(i) if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { - exported = exported || hasExportedField(dst.Field(i)) - } else { + exported = exported || hasMergeableFields(dst.Field(i)) + } else if isExportedComponent(&field) { exported = exported || len(field.PkgPath) == 0 } } return } +func isExportedComponent(field *reflect.StructField) bool { + pkgPath := field.PkgPath + if len(pkgPath) > 0 { + return false + } + c := field.Name[0] + if 'a' <= c && c <= 'z' || c == '_' { + return false + } + return true +} + type Config struct { Overwrite bool AppendSlice bool @@ -32,6 +44,8 @@ type Config struct { Transformers Transformers overwriteWithEmptyValue bool overwriteSliceWithEmptyValue bool + sliceDeepCopy bool + debug bool } type Transformers interface { @@ -46,7 +60,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co typeCheck := config.TypeCheck overwriteWithEmptySrc := config.overwriteWithEmptyValue overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue - config.overwriteWithEmptyValue = false + sliceDeepCopy := config.sliceDeepCopy if !src.IsValid() { return @@ -74,14 +88,14 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co switch dst.Kind() { case reflect.Struct: - if hasExportedField(dst) { + if hasMergeableFields(dst) { for i, n := 0, dst.NumField(); i < n; i++ { if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { return } } } else { - if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { + if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { dst.Set(src) } } @@ -89,6 +103,14 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if dst.IsNil() && !src.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } + + if src.Kind() != reflect.Map { + if overwrite { + dst.Set(src) + } + return + } + for _, key := range src.MapKeys() { srcElement := src.MapIndex(key) if !srcElement.IsValid() { @@ -98,6 +120,9 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co switch srcElement.Kind() { case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: if srcElement.IsNil() { + if overwrite { + dst.SetMapIndex(key, srcElement) + } continue } fallthrough @@ -132,7 +157,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dstSlice = reflect.ValueOf(dstElement.Interface()) } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { if typeCheck && srcSlice.Type() != dstSlice.Type() { return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) } @@ -142,6 +167,24 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) } dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } else if sliceDeepCopy { + i := 0 + for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { + srcElement := srcSlice.Index(i) + dstElement := dstSlice.Index(i) + + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } + } dst.SetMapIndex(key, dstSlice) } @@ -161,26 +204,35 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if !dst.CanSet() { break } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { dst.Set(src) } else if config.AppendSlice { if src.Type() != dst.Type() { return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) } dst.Set(reflect.AppendSlice(dst, src)) + } else if sliceDeepCopy { + for i := 0; i < src.Len() && i < dst.Len(); i++ { + srcElement := src.Index(i) + dstElement := dst.Index(i) + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } } case reflect.Ptr: fallthrough case reflect.Interface: - if src.IsNil() { - break - } - - if dst.Kind() != reflect.Ptr && src.Type().AssignableTo(dst.Type()) { - if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } + if isReflectNil(src) { + if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { + dst.Set(src) } break } @@ -203,16 +255,33 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } break } + if dst.IsNil() || overwrite { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } - } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return + break + } + + if dst.Elem().Kind() == src.Elem().Kind() { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + break } default: - if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { - dst.Set(src) + mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) + v := fmt.Sprintf("%v", src) + if v == "TestIssue106" { + fmt.Println(mustSet) + fmt.Println(dst.CanSet()) + } + if mustSet { + if dst.CanSet() { + dst.Set(src) + } else { + dst = src + } } } @@ -246,7 +315,13 @@ func WithOverride(config *Config) { config.Overwrite = true } -// WithOverride will make merge override empty dst slice with empty src slice. +// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. +func WithOverwriteWithEmptyValue(config *Config) { + config.Overwrite = true + config.overwriteWithEmptyValue = true +} + +// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. func WithOverrideEmptySlice(config *Config) { config.overwriteSliceWithEmptyValue = true } @@ -261,7 +336,16 @@ func WithTypeCheck(config *Config) { config.TypeCheck = true } +// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. +func WithSliceDeepCopy(config *Config) { + config.sliceDeepCopy = true + config.Overwrite = true +} + func merge(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerAgument + } var ( vDst, vSrc reflect.Value err error @@ -281,3 +365,16 @@ func merge(dst, src interface{}, opts ...func(*Config)) error { } return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) } + +// IsReflectNil is the reflect value provided nil +func isReflectNil(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: + // Both interface and slice are nil if first word is 0. + // Both are always bigger than a word; assume flagIndir. + return v.IsNil() + default: + return false + } +} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index a82fea2fd..3cc926c7f 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -20,6 +20,7 @@ var ( ErrNotSupported = errors.New("only structs and maps are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") + ErrNonPointerAgument = errors.New("dst must be a pointer") ) // During deepMerge, must keep track of checks that are @@ -75,23 +76,3 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { } return } - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - return // TODO refactor -} diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md index 50d56ffbf..52b111d5f 100644 --- a/vendor/github.com/json-iterator/go/README.md +++ b/vendor/github.com/json-iterator/go/README.md @@ -1,5 +1,5 @@ [![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) -[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/json-iterator/go) [![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) [![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) [![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) @@ -18,16 +18,16 @@ Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/githu Raw Result (easyjson requires static code generation) -| | ns/op | allocation bytes | allocation times | -| --- | --- | --- | --- | -| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | -| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | -| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | -| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | -| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | -| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | +| | ns/op | allocation bytes | allocation times | +| --------------- | ----------- | ---------------- | ---------------- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | -Always benchmark with your own workload. +Always benchmark with your own workload. The result depends heavily on the data input. # Usage @@ -41,10 +41,10 @@ import "encoding/json" json.Marshal(&data) ``` -with +with ```go -import "github.com/json-iterator/go" +import jsoniter "github.com/json-iterator/go" var json = jsoniter.ConfigCompatibleWithStandardLibrary json.Marshal(&data) @@ -60,7 +60,7 @@ json.Unmarshal(input, &data) with ```go -import "github.com/json-iterator/go" +import jsoniter "github.com/json-iterator/go" var json = jsoniter.ConfigCompatibleWithStandardLibrary json.Unmarshal(input, &data) @@ -78,10 +78,10 @@ go get github.com/json-iterator/go Contributors -* [thockin](https://github.com/thockin) -* [mattn](https://github.com/mattn) -* [cch123](https://github.com/cch123) -* [Oleg Shaldybin](https://github.com/olegshaldybin) -* [Jason Toffaletti](https://github.com/toffaletti) +- [thockin](https://github.com/thockin) +- [mattn](https://github.com/mattn) +- [cch123](https://github.com/cch123) +- [Oleg Shaldybin](https://github.com/olegshaldybin) +- [Jason Toffaletti](https://github.com/toffaletti) Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go index a4b93c78c..1f12f6612 100644 --- a/vendor/github.com/json-iterator/go/any_str.go +++ b/vendor/github.com/json-iterator/go/any_str.go @@ -64,7 +64,6 @@ func (any *stringAny) ToInt64() int64 { flag := 1 startPos := 0 - endPos := 0 if any.val[0] == '+' || any.val[0] == '-' { startPos = 1 } @@ -73,6 +72,7 @@ func (any *stringAny) ToInt64() int64 { flag = -1 } + endPos := startPos for i := startPos; i < len(any.val); i++ { if any.val[i] >= '0' && any.val[i] <= '9' { endPos = i + 1 @@ -98,7 +98,6 @@ func (any *stringAny) ToUint64() uint64 { } startPos := 0 - endPos := 0 if any.val[0] == '-' { return 0 @@ -107,6 +106,7 @@ func (any *stringAny) ToUint64() uint64 { startPos = 1 } + endPos := startPos for i := startPos; i < len(any.val); i++ { if any.val[i] >= '0' && any.val[i] <= '9' { endPos = i + 1 diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go index 8c58fcba5..2adcdc3b7 100644 --- a/vendor/github.com/json-iterator/go/config.go +++ b/vendor/github.com/json-iterator/go/config.go @@ -183,11 +183,11 @@ func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { rawMessage := *(*json.RawMessage)(ptr) iter := cfg.BorrowIterator([]byte(rawMessage)) + defer cfg.ReturnIterator(iter) iter.Read() - if iter.Error != nil { + if iter.Error != nil && iter.Error != io.EOF { stream.WriteRaw("null") } else { - cfg.ReturnIterator(iter) stream.WriteRaw(string(rawMessage)) } }, func(ptr unsafe.Pointer) bool { diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go index b65137114..58ee89c84 100644 --- a/vendor/github.com/json-iterator/go/iter_object.go +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -150,7 +150,7 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { if c == '}' { return iter.decrementDepth() } - iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) + iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c})) iter.decrementDepth() return false } @@ -206,7 +206,7 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { if c == '}' { return iter.decrementDepth() } - iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c})) iter.decrementDepth() return false } diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go index 80320cd64..74a97bfe5 100644 --- a/vendor/github.com/json-iterator/go/reflect_extension.go +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -475,7 +475,7 @@ func calcFieldNames(originalFieldName string, tagProvidedFieldName string, whole fieldNames = []string{tagProvidedFieldName} } // private? - isNotExported := unicode.IsLower(rune(originalFieldName[0])) + isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_' if isNotExported { fieldNames = []string{} } diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go index 9e2b623fe..582967130 100644 --- a/vendor/github.com/json-iterator/go/reflect_map.go +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -49,6 +49,33 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { return decoder } } + + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(unmarshalerType) { + return &unmarshalerDecoder{ + valType: typ, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(textUnmarshalerType) { + return &textUnmarshalerDecoder{ + valType: typ, + } + } + switch typ.Kind() { case reflect.String: return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) @@ -63,31 +90,6 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { typ = reflect2.DefaultTypeOfKind(typ.Kind()) return &numericMapKeyDecoder{decoderOfType(ctx, typ)} default: - ptrType := reflect2.PtrTo(typ) - if ptrType.Implements(unmarshalerType) { - return &referenceDecoder{ - &unmarshalerDecoder{ - valType: ptrType, - }, - } - } - if typ.Implements(unmarshalerType) { - return &unmarshalerDecoder{ - valType: typ, - } - } - if ptrType.Implements(textUnmarshalerType) { - return &referenceDecoder{ - &textUnmarshalerDecoder{ - valType: ptrType, - }, - } - } - if typ.Implements(textUnmarshalerType) { - return &textUnmarshalerDecoder{ - valType: typ, - } - } return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} } } @@ -103,6 +105,19 @@ func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { return encoder } } + + if typ == textMarshalerType { + return &directTextMarshalerEncoder{ + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Implements(textMarshalerType) { + return &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + switch typ.Kind() { case reflect.String: return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) @@ -117,17 +132,6 @@ func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { typ = reflect2.DefaultTypeOfKind(typ.Kind()) return &numericMapKeyEncoder{encoderOfType(ctx, typ)} default: - if typ == textMarshalerType { - return &directTextMarshalerEncoder{ - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - } - if typ.Implements(textMarshalerType) { - return &textMarshalerEncoder{ - valType: typ, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - } if typ.Kind() == reflect.Interface { return &dynamicMapKeyEncoder{ctx, typ} } @@ -163,10 +167,6 @@ func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { if c == '}' { return } - if c != '"' { - iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) - return - } iter.unreadByte() key := decoder.keyType.UnsafeNew() decoder.keyDecoder.Decode(key, iter) diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go index 43ec71d6d..fa71f4748 100644 --- a/vendor/github.com/json-iterator/go/reflect_optional.go +++ b/vendor/github.com/json-iterator/go/reflect_optional.go @@ -2,7 +2,6 @@ package jsoniter import ( "github.com/modern-go/reflect2" - "reflect" "unsafe" ) @@ -10,9 +9,6 @@ func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { ptrType := typ.(*reflect2.UnsafePtrType) elemType := ptrType.Elem() decoder := decoderOfType(ctx, elemType) - if ctx.prefix == "" && elemType.Kind() == reflect.Ptr { - return &dereferenceDecoder{elemType, decoder} - } return &OptionalDecoder{elemType, decoder} } diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go index 5ad5cc561..d7eb0eb5c 100644 --- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -507,7 +507,7 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) for c = ','; c == ','; c = iter.nextToken() { decoder.decodeOneField(ptr, iter) } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } if c != '}' { @@ -588,7 +588,7 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -622,7 +622,7 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -660,7 +660,7 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -702,7 +702,7 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -748,7 +748,7 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -798,7 +798,7 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -852,7 +852,7 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -910,7 +910,7 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -972,7 +972,7 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -1038,7 +1038,7 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go index 17662fded..23d8a3ad6 100644 --- a/vendor/github.com/json-iterator/go/stream.go +++ b/vendor/github.com/json-iterator/go/stream.go @@ -103,14 +103,14 @@ func (stream *Stream) Flush() error { if stream.Error != nil { return stream.Error } - n, err := stream.out.Write(stream.buf) + _, err := stream.out.Write(stream.buf) if err != nil { if stream.Error == nil { stream.Error = err } return err } - stream.buf = stream.buf[n:] + stream.buf = stream.buf[:0] return nil } @@ -177,7 +177,6 @@ func (stream *Stream) WriteEmptyObject() { func (stream *Stream) WriteMore() { stream.writeByte(',') stream.writeIndention(0) - stream.Flush() } // WriteArrayStart write [ with possible indention diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md index 195333e51..09a4a35c9 100644 --- a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md @@ -27,6 +27,7 @@ We thank all the authors who provided code to this library: * Felix Kollmann * Nicolas Perraut +* @dirty49374 ## License diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go index ef18d8f97..57f530ae8 100644 --- a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go @@ -4,7 +4,6 @@ package sequences import ( "syscall" - "unsafe" ) var ( @@ -27,7 +26,7 @@ func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error { mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING } - ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode)) + ret, _, err := setConsoleMode.Call(uintptr(stream), uintptr(mode)) if ret == 0 { return err } diff --git a/vendor/github.com/nxadm/tail/.gitignore b/vendor/github.com/nxadm/tail/.gitignore new file mode 100644 index 000000000..fa81aa93a --- /dev/null +++ b/vendor/github.com/nxadm/tail/.gitignore @@ -0,0 +1,2 @@ +.idea/ +.test/ \ No newline at end of file diff --git a/vendor/github.com/hpcloud/tail/.travis.yml b/vendor/github.com/nxadm/tail/.travis.yml similarity index 52% rename from vendor/github.com/hpcloud/tail/.travis.yml rename to vendor/github.com/nxadm/tail/.travis.yml index 9cf8bb7fc..95dd3bd78 100644 --- a/vendor/github.com/hpcloud/tail/.travis.yml +++ b/vendor/github.com/nxadm/tail/.travis.yml @@ -4,15 +4,13 @@ script: - go test -race -v ./... go: - - 1.4 - - 1.5 - - 1.6 + - "1.9" + - "1.10" + - "1.11" + - "1.12" + - "1.13" - tip matrix: allow_failures: - go: tip - -install: - - go get gopkg.in/fsnotify.v1 - - go get gopkg.in/tomb.v1 diff --git a/vendor/github.com/nxadm/tail/CHANGES.md b/vendor/github.com/nxadm/tail/CHANGES.md new file mode 100644 index 000000000..ef1b5fbed --- /dev/null +++ b/vendor/github.com/nxadm/tail/CHANGES.md @@ -0,0 +1,46 @@ +# Version v1.4.4 + +* Fix of checksum problem because of forced tag. No changes to the code. + +# Version v1.4.1 + +* Incorporated PR 162 by by Mohammed902: "Simplify non-Windows build tag". + +# Version v1.4.0 + +* Incorporated PR 9 by mschneider82: "Added seekinfo to Tail". + +# Version v1.3.1 + +* Incorporated PR 7: "Fix deadlock when stopping on non-empty file/buffer", +fixes upstream issue 93. + + +# Version v1.3.0 + +* Incorporated changes of unmerged upstream PR 149 by mezzi: "added line num +to Line struct". + +# Version v1.2.1 + +* Incorporated changes of unmerged upstream PR 128 by jadekler: "Compile-able +code in readme". +* Incorporated changes of unmerged upstream PR 130 by fgeller: "small change +to comment wording". +* Incorporated changes of unmerged upstream PR 133 by sm3142: "removed +spurious newlines from log messages". + +# Version v1.2.0 + +* Incorporated changes of unmerged upstream PR 126 by Code-Hex: "Solved the + problem for never return the last line if it's not followed by a newline". +* Incorporated changes of unmerged upstream PR 131 by StoicPerlman: "Remove +deprecated os.SEEK consts". The changes bumped the minimal supported Go +release to 1.9. + +# Version v1.1.0 + +* migration to go modules. +* release of master branch of the dormant upstream, because it contains +fixes and improvement no present in the tagged release. + diff --git a/vendor/github.com/nxadm/tail/Dockerfile b/vendor/github.com/nxadm/tail/Dockerfile new file mode 100644 index 000000000..d9633891c --- /dev/null +++ b/vendor/github.com/nxadm/tail/Dockerfile @@ -0,0 +1,19 @@ +FROM golang + +RUN mkdir -p $GOPATH/src/github.com/nxadm/tail/ +ADD . $GOPATH/src/github.com/nxadm/tail/ + +# expecting to fetch dependencies successfully. +RUN go get -v github.com/nxadm/tail + +# expecting to run the test successfully. +RUN go test -v github.com/nxadm/tail + +# expecting to install successfully +RUN go install -v github.com/nxadm/tail +RUN go install -v github.com/nxadm/tail/cmd/gotail + +RUN $GOPATH/bin/gotail -h || true + +ENV PATH $GOPATH/bin:$PATH +CMD ["gotail"] diff --git a/vendor/github.com/hpcloud/tail/LICENSE.txt b/vendor/github.com/nxadm/tail/LICENSE similarity index 100% rename from vendor/github.com/hpcloud/tail/LICENSE.txt rename to vendor/github.com/nxadm/tail/LICENSE diff --git a/vendor/github.com/nxadm/tail/README.md b/vendor/github.com/nxadm/tail/README.md new file mode 100644 index 000000000..dbb6c1727 --- /dev/null +++ b/vendor/github.com/nxadm/tail/README.md @@ -0,0 +1,36 @@ +[![Build Status](https://travis-ci.org/nxadm/tail.svg?branch=master)](https://travis-ci.org/nxadm/tail) + +This is repo is forked from the dormant upstream repo at +[hpcloud](https://github.com/hpcloud/tail). This fork adds support for go +modules, updates the dependencies, adds features and fixes bugs. Go 1.9 is +the oldest compiler release supported. + +# Go package for tail-ing files + +A Go package striving to emulate the features of the BSD `tail` program. + +```Go +t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true}) +if err != nil { + panic(err) +} + +for line := range t.Lines { + fmt.Println(line.Text) +} +``` + +See [API documentation](http://godoc.org/github.com/nxadm/tail). + +## Log rotation + +Tail comes with full support for truncation/move detection as it is +designed to work with log rotation tools. + +## Installing + + go get github.com/nxadm/tail/... + +## Windows support + +This package [needs assistance](https://github.com/nxadm/tail/labels/Windows) for full Windows support. diff --git a/vendor/github.com/hpcloud/tail/appveyor.yml b/vendor/github.com/nxadm/tail/appveyor.yml similarity index 77% rename from vendor/github.com/hpcloud/tail/appveyor.yml rename to vendor/github.com/nxadm/tail/appveyor.yml index d370055b6..e149bc62d 100644 --- a/vendor/github.com/hpcloud/tail/appveyor.yml +++ b/vendor/github.com/nxadm/tail/appveyor.yml @@ -5,7 +5,7 @@ build_script: - SET GOPATH=c:\workspace - go test -v -race ./... test: off -clone_folder: c:\workspace\src\github.com\hpcloud\tail +clone_folder: c:\workspace\src\github.com\nxadm\tail branches: only: - master diff --git a/vendor/github.com/nxadm/tail/go.mod b/vendor/github.com/nxadm/tail/go.mod new file mode 100644 index 000000000..fb10d42af --- /dev/null +++ b/vendor/github.com/nxadm/tail/go.mod @@ -0,0 +1,9 @@ +module github.com/nxadm/tail + +go 1.13 + +require ( + github.com/fsnotify/fsnotify v1.4.7 + golang.org/x/sys v0.0.0-20190904154756-749cb33beabd // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 +) diff --git a/vendor/github.com/nxadm/tail/go.sum b/vendor/github.com/nxadm/tail/go.sum new file mode 100644 index 000000000..b391f1904 --- /dev/null +++ b/vendor/github.com/nxadm/tail/go.sum @@ -0,0 +1,6 @@ +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/Licence b/vendor/github.com/nxadm/tail/ratelimiter/Licence similarity index 100% rename from vendor/github.com/hpcloud/tail/ratelimiter/Licence rename to vendor/github.com/nxadm/tail/ratelimiter/Licence diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go b/vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go similarity index 100% rename from vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go rename to vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/memory.go b/vendor/github.com/nxadm/tail/ratelimiter/memory.go similarity index 81% rename from vendor/github.com/hpcloud/tail/ratelimiter/memory.go rename to vendor/github.com/nxadm/tail/ratelimiter/memory.go index 8f6a5784a..bf3c2131b 100644 --- a/vendor/github.com/hpcloud/tail/ratelimiter/memory.go +++ b/vendor/github.com/nxadm/tail/ratelimiter/memory.go @@ -5,7 +5,10 @@ import ( "time" ) -const GC_SIZE int = 100 +const ( + GC_SIZE int = 100 + GC_PERIOD time.Duration = 60 * time.Second +) type Memory struct { store map[string]LeakyBucket @@ -44,11 +47,10 @@ func (m *Memory) GarbageCollect() { now := time.Now() // rate limit GC to once per minute - if now.Add(60*time.Second).Unix() > m.lastGCCollected.Unix() { - + if now.Unix() >= m.lastGCCollected.Add(GC_PERIOD).Unix() { for key, bucket := range m.store { // if the bucket is drained, then GC - if bucket.DrainedAt().Unix() > now.Unix() { + if bucket.DrainedAt().Unix() < now.Unix() { delete(m.store, key) } } diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/storage.go b/vendor/github.com/nxadm/tail/ratelimiter/storage.go similarity index 100% rename from vendor/github.com/hpcloud/tail/ratelimiter/storage.go rename to vendor/github.com/nxadm/tail/ratelimiter/storage.go diff --git a/vendor/github.com/hpcloud/tail/tail.go b/vendor/github.com/nxadm/tail/tail.go similarity index 85% rename from vendor/github.com/hpcloud/tail/tail.go rename to vendor/github.com/nxadm/tail/tail.go index 2d252d605..58d3c4b95 100644 --- a/vendor/github.com/hpcloud/tail/tail.go +++ b/vendor/github.com/nxadm/tail/tail.go @@ -15,31 +15,33 @@ import ( "sync" "time" - "github.com/hpcloud/tail/ratelimiter" - "github.com/hpcloud/tail/util" - "github.com/hpcloud/tail/watch" + "github.com/nxadm/tail/ratelimiter" + "github.com/nxadm/tail/util" + "github.com/nxadm/tail/watch" "gopkg.in/tomb.v1" ) var ( - ErrStop = fmt.Errorf("tail should now stop") + ErrStop = errors.New("tail should now stop") ) type Line struct { - Text string - Time time.Time - Err error // Error from tail + Text string + Num int + SeekInfo SeekInfo + Time time.Time + Err error // Error from tail } // NewLine returns a Line with present time. -func NewLine(text string) *Line { - return &Line{text, time.Now(), nil} +func NewLine(text string, lineNum int) *Line { + return &Line{text, lineNum, SeekInfo{}, time.Now(), nil} } -// SeekInfo represents arguments to `os.Seek` +// SeekInfo represents arguments to `io.Seek` type SeekInfo struct { Offset int64 - Whence int // os.SEEK_* + Whence int // io.Seek* } type logger interface { @@ -78,8 +80,9 @@ type Tail struct { Lines chan *Line Config - file *os.File - reader *bufio.Reader + file *os.File + reader *bufio.Reader + lineNum int watcher watch.FileWatcher changes *watch.FileChanges @@ -113,7 +116,7 @@ func TailFile(filename string, config Config) (*Tail, error) { // when Logger was not specified in config, use default logger if t.Logger == nil { - t.Logger = log.New(os.Stderr, "", log.LstdFlags) + t.Logger = DefaultLogger } if t.Poll { @@ -135,15 +138,15 @@ func TailFile(filename string, config Config) (*Tail, error) { return t, nil } -// Return the file's current position, like stdio's ftell(). +// Tell returns the file's current position, like stdio's ftell(). // But this value is not very accurate. -// it may readed one line in the chan(tail.Lines), -// so it may lost one line. +// One line from the chan(tail.Lines) may have been read, +// so it may have lost one line. func (tail *Tail) Tell() (offset int64, err error) { if tail.file == nil { return } - offset, err = tail.file.Seek(0, os.SEEK_CUR) + offset, err = tail.file.Seek(0, io.SeekCurrent) if err != nil { return } @@ -186,6 +189,7 @@ func (tail *Tail) closeFile() { func (tail *Tail) reopen() error { tail.closeFile() + tail.lineNum = 0 for { var err error tail.file, err = OpenFile(tail.Filename) @@ -241,7 +245,6 @@ func (tail *Tail) tailFileSync() { // Seek to requested location on first open of the file. if tail.Location != nil { _, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence) - tail.Logger.Printf("Seeked %s - %+v\n", tail.Filename, tail.Location) if err != nil { tail.Killf("Seek error on %s: %s", tail.Filename, err) return @@ -250,16 +253,12 @@ func (tail *Tail) tailFileSync() { tail.openReader() - var offset int64 = 0 - var err error - // Read line by line. for { // do not seek in named pipes if !tail.Pipe { // grab the position in case we need to back up in the event of a half-line - offset, err = tail.Tell() - if err != nil { + if _, err := tail.Tell(); err != nil { tail.Kill(err) return } @@ -273,10 +272,9 @@ func (tail *Tail) tailFileSync() { if cooloff { // Wait a second before seeking till the end of // file when rate limit is reached. - msg := fmt.Sprintf( - "Too much log activity; waiting a second " + - "before resuming tailing") - tail.Lines <- &Line{msg, time.Now(), fmt.Errorf(msg)} + msg := ("Too much log activity; waiting a second before resuming tailing") + offset, _ := tail.Tell() + tail.Lines <- &Line{msg, tail.lineNum, SeekInfo{Offset: offset}, time.Now(), errors.New(msg)} select { case <-time.After(time.Second): case <-tail.Dying(): @@ -296,10 +294,8 @@ func (tail *Tail) tailFileSync() { } if tail.Follow && line != "" { - // this has the potential to never return the last line if - // it's not followed by a newline; seems a fair trade here - err := tail.seekTo(SeekInfo{Offset: offset, Whence: 0}) - if err != nil { + tail.sendLine(line) + if err := tail.seekEnd(); err != nil { tail.Kill(err) return } @@ -337,7 +333,7 @@ func (tail *Tail) tailFileSync() { // reopened if ReOpen is true. Truncated files are always reopened. func (tail *Tail) waitForChanges() error { if tail.changes == nil { - pos, err := tail.file.Seek(0, os.SEEK_CUR) + pos, err := tail.file.Seek(0, io.SeekCurrent) if err != nil { return err } @@ -361,10 +357,9 @@ func (tail *Tail) waitForChanges() error { tail.Logger.Printf("Successfully reopened %s", tail.Filename) tail.openReader() return nil - } else { - tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename) - return ErrStop } + tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename) + return ErrStop case <-tail.changes.Truncated: // Always reopen truncated files (Follow is true) tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename) @@ -377,20 +372,21 @@ func (tail *Tail) waitForChanges() error { case <-tail.Dying(): return ErrStop } - panic("unreachable") } func (tail *Tail) openReader() { + tail.lk.Lock() if tail.MaxLineSize > 0 { // add 2 to account for newline characters tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2) } else { tail.reader = bufio.NewReader(tail.file) } + tail.lk.Unlock() } func (tail *Tail) seekEnd() error { - return tail.seekTo(SeekInfo{Offset: 0, Whence: os.SEEK_END}) + return tail.seekTo(SeekInfo{Offset: 0, Whence: io.SeekEnd}) } func (tail *Tail) seekTo(pos SeekInfo) error { @@ -415,13 +411,19 @@ func (tail *Tail) sendLine(line string) bool { } for _, line := range lines { - tail.Lines <- &Line{line, now, nil} + tail.lineNum++ + offset, _ := tail.Tell() + select { + case tail.Lines <- &Line{line, tail.lineNum, SeekInfo{Offset: offset}, now, nil}: + case <-tail.Dying(): + return true + } } if tail.Config.RateLimiter != nil { ok := tail.Config.RateLimiter.Pour(uint16(len(lines))) if !ok { - tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.\n", + tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.", tail.Filename) return false } diff --git a/vendor/github.com/hpcloud/tail/tail_posix.go b/vendor/github.com/nxadm/tail/tail_posix.go similarity index 71% rename from vendor/github.com/hpcloud/tail/tail_posix.go rename to vendor/github.com/nxadm/tail/tail_posix.go index bc4dc3357..1b94520ec 100644 --- a/vendor/github.com/hpcloud/tail/tail_posix.go +++ b/vendor/github.com/nxadm/tail/tail_posix.go @@ -1,4 +1,4 @@ -// +build linux darwin freebsd netbsd openbsd +// +build !windows package tail diff --git a/vendor/github.com/hpcloud/tail/tail_windows.go b/vendor/github.com/nxadm/tail/tail_windows.go similarity index 81% rename from vendor/github.com/hpcloud/tail/tail_windows.go rename to vendor/github.com/nxadm/tail/tail_windows.go index ef2cfca1b..4aaceea28 100644 --- a/vendor/github.com/hpcloud/tail/tail_windows.go +++ b/vendor/github.com/nxadm/tail/tail_windows.go @@ -3,7 +3,7 @@ package tail import ( - "github.com/hpcloud/tail/winfile" + "github.com/nxadm/tail/winfile" "os" ) diff --git a/vendor/github.com/hpcloud/tail/util/util.go b/vendor/github.com/nxadm/tail/util/util.go similarity index 94% rename from vendor/github.com/hpcloud/tail/util/util.go rename to vendor/github.com/nxadm/tail/util/util.go index 54151fe39..2ba0ed71c 100644 --- a/vendor/github.com/hpcloud/tail/util/util.go +++ b/vendor/github.com/nxadm/tail/util/util.go @@ -18,7 +18,7 @@ var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)} // fatal is like panic except it displays only the current goroutine's stack. func Fatal(format string, v ...interface{}) { - // https://github.com/hpcloud/log/blob/master/log.go#L45 + // https://github.com/nxadm/log/blob/master/log.go#L45 LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack())) os.Exit(1) } diff --git a/vendor/github.com/hpcloud/tail/watch/filechanges.go b/vendor/github.com/nxadm/tail/watch/filechanges.go similarity index 93% rename from vendor/github.com/hpcloud/tail/watch/filechanges.go rename to vendor/github.com/nxadm/tail/watch/filechanges.go index 3ce5dcecb..f80aead9a 100644 --- a/vendor/github.com/hpcloud/tail/watch/filechanges.go +++ b/vendor/github.com/nxadm/tail/watch/filechanges.go @@ -8,7 +8,7 @@ type FileChanges struct { func NewFileChanges() *FileChanges { return &FileChanges{ - make(chan bool), make(chan bool), make(chan bool)} + make(chan bool, 1), make(chan bool, 1), make(chan bool, 1)} } func (fc *FileChanges) NotifyModified() { diff --git a/vendor/github.com/hpcloud/tail/watch/inotify.go b/vendor/github.com/nxadm/tail/watch/inotify.go similarity index 88% rename from vendor/github.com/hpcloud/tail/watch/inotify.go rename to vendor/github.com/nxadm/tail/watch/inotify.go index 4478f1e1a..439921810 100644 --- a/vendor/github.com/hpcloud/tail/watch/inotify.go +++ b/vendor/github.com/nxadm/tail/watch/inotify.go @@ -8,9 +8,9 @@ import ( "os" "path/filepath" - "github.com/hpcloud/tail/util" + "github.com/nxadm/tail/util" - "gopkg.in/fsnotify.v1" + "github.com/fsnotify/fsnotify" "gopkg.in/tomb.v1" ) @@ -75,7 +75,6 @@ func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChange fw.Size = pos go func() { - defer RemoveWatch(fw.Filename) events := Events(fw.Filename) @@ -88,9 +87,11 @@ func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChange select { case evt, ok = <-events: if !ok { + RemoveWatch(fw.Filename) return } case <-t.Dying(): + RemoveWatch(fw.Filename) return } @@ -99,13 +100,19 @@ func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChange fallthrough case evt.Op&fsnotify.Rename == fsnotify.Rename: + RemoveWatch(fw.Filename) changes.NotifyDeleted() return + //With an open fd, unlink(fd) - inotify returns IN_ATTRIB (==fsnotify.Chmod) + case evt.Op&fsnotify.Chmod == fsnotify.Chmod: + fallthrough + case evt.Op&fsnotify.Write == fsnotify.Write: fi, err := os.Stat(fw.Filename) if err != nil { if os.IsNotExist(err) { + RemoveWatch(fw.Filename) changes.NotifyDeleted() return } diff --git a/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go b/vendor/github.com/nxadm/tail/watch/inotify_tracker.go similarity index 86% rename from vendor/github.com/hpcloud/tail/watch/inotify_tracker.go rename to vendor/github.com/nxadm/tail/watch/inotify_tracker.go index 03be4275c..a94bcd4cb 100644 --- a/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go +++ b/vendor/github.com/nxadm/tail/watch/inotify_tracker.go @@ -10,9 +10,9 @@ import ( "sync" "syscall" - "github.com/hpcloud/tail/util" + "github.com/nxadm/tail/util" - "gopkg.in/fsnotify.v1" + "github.com/fsnotify/fsnotify" ) type InotifyTracker struct { @@ -83,21 +83,21 @@ func watch(winfo *watchInfo) error { } // RemoveWatch signals the run goroutine to remove the watch for the input filename -func RemoveWatch(fname string) { - remove(&watchInfo{ +func RemoveWatch(fname string) error { + return remove(&watchInfo{ fname: fname, }) } // RemoveWatch create signals the run goroutine to remove the watch for the input filename -func RemoveWatchCreate(fname string) { - remove(&watchInfo{ +func RemoveWatchCreate(fname string) error { + return remove(&watchInfo{ op: fsnotify.Create, fname: fname, }) } -func remove(winfo *watchInfo) { +func remove(winfo *watchInfo) error { // start running the shared InotifyTracker if not already running once.Do(goRun) @@ -108,27 +108,10 @@ func remove(winfo *watchInfo) { delete(shared.done, winfo.fname) close(done) } - - fname := winfo.fname - if winfo.isCreate() { - // Watch for new files to be created in the parent directory. - fname = filepath.Dir(fname) - } - shared.watchNums[fname]-- - watchNum := shared.watchNums[fname] - if watchNum == 0 { - delete(shared.watchNums, fname) - } shared.mux.Unlock() - // If we were the last ones to watch this file, unsubscribe from inotify. - // This needs to happen after releasing the lock because fsnotify waits - // synchronously for the kernel to acknowledge the removal of the watch - // for this file, which causes us to deadlock if we still held the lock. - if watchNum == 0 { - shared.watcher.Remove(fname) - } shared.remove <- winfo + return <-shared.error } // Events returns a channel to which FileEvents corresponding to the input filename @@ -142,8 +125,8 @@ func Events(fname string) <-chan fsnotify.Event { } // Cleanup removes the watch for the input filename if necessary. -func Cleanup(fname string) { - RemoveWatch(fname) +func Cleanup(fname string) error { + return RemoveWatch(fname) } // watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating @@ -154,6 +137,8 @@ func (shared *InotifyTracker) addWatch(winfo *watchInfo) error { if shared.chans[winfo.fname] == nil { shared.chans[winfo.fname] = make(chan fsnotify.Event) + } + if shared.done[winfo.fname] == nil { shared.done[winfo.fname] = make(chan bool) } @@ -163,47 +148,50 @@ func (shared *InotifyTracker) addWatch(winfo *watchInfo) error { fname = filepath.Dir(fname) } + var err error // already in inotify watch - if shared.watchNums[fname] > 0 { - shared.watchNums[fname]++ - if winfo.isCreate() { - shared.watchNums[winfo.fname]++ - } - return nil + if shared.watchNums[fname] == 0 { + err = shared.watcher.Add(fname) } - - err := shared.watcher.Add(fname) if err == nil { shared.watchNums[fname]++ - if winfo.isCreate() { - shared.watchNums[winfo.fname]++ - } } return err } // removeWatch calls fsnotify.RemoveWatch for the input filename and closes the // corresponding events channel. -func (shared *InotifyTracker) removeWatch(winfo *watchInfo) { +func (shared *InotifyTracker) removeWatch(winfo *watchInfo) error { shared.mux.Lock() - defer shared.mux.Unlock() ch := shared.chans[winfo.fname] - if ch == nil { - return + if ch != nil { + delete(shared.chans, winfo.fname) + close(ch) } - delete(shared.chans, winfo.fname) - close(ch) - - if !winfo.isCreate() { - return + fname := winfo.fname + if winfo.isCreate() { + // Watch for new files to be created in the parent directory. + fname = filepath.Dir(fname) } + shared.watchNums[fname]-- + watchNum := shared.watchNums[fname] + if watchNum == 0 { + delete(shared.watchNums, fname) + } + shared.mux.Unlock() - shared.watchNums[winfo.fname]-- - if shared.watchNums[winfo.fname] == 0 { - delete(shared.watchNums, winfo.fname) + var err error + // If we were the last ones to watch this file, unsubscribe from inotify. + // This needs to happen after releasing the lock because fsnotify waits + // synchronously for the kernel to acknowledge the removal of the watch + // for this file, which causes us to deadlock if we still held the lock. + if watchNum == 0 { + err = shared.watcher.Remove(fname) } + + return err } // sendEvent sends the input event to the appropriate Tail. @@ -238,7 +226,7 @@ func (shared *InotifyTracker) run() { shared.error <- shared.addWatch(winfo) case winfo := <-shared.remove: - shared.removeWatch(winfo) + shared.error <- shared.removeWatch(winfo) case event, open := <-shared.watcher.Events: if !open { diff --git a/vendor/github.com/hpcloud/tail/watch/polling.go b/vendor/github.com/nxadm/tail/watch/polling.go similarity index 98% rename from vendor/github.com/hpcloud/tail/watch/polling.go rename to vendor/github.com/nxadm/tail/watch/polling.go index 49491f21d..fb1706908 100644 --- a/vendor/github.com/hpcloud/tail/watch/polling.go +++ b/vendor/github.com/nxadm/tail/watch/polling.go @@ -8,7 +8,7 @@ import ( "runtime" "time" - "github.com/hpcloud/tail/util" + "github.com/nxadm/tail/util" "gopkg.in/tomb.v1" ) diff --git a/vendor/github.com/hpcloud/tail/watch/watch.go b/vendor/github.com/nxadm/tail/watch/watch.go similarity index 100% rename from vendor/github.com/hpcloud/tail/watch/watch.go rename to vendor/github.com/nxadm/tail/watch/watch.go diff --git a/vendor/github.com/hpcloud/tail/winfile/winfile.go b/vendor/github.com/nxadm/tail/winfile/winfile.go similarity index 100% rename from vendor/github.com/hpcloud/tail/winfile/winfile.go rename to vendor/github.com/nxadm/tail/winfile/winfile.go diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml index 72e8ccf0b..079af2431 100644 --- a/vendor/github.com/onsi/ginkgo/.travis.yml +++ b/vendor/github.com/onsi/ginkgo/.travis.yml @@ -1,15 +1,25 @@ language: go go: - - 1.10.x - - 1.11.x - - 1.12.x + - 1.13.x + - 1.14.x - tip +cache: + directories: + - $GOPATH/pkg/mod + +# allow internal package imports, necessary for forked repositories +go_import_path: github.com/onsi/ginkgo + install: - - go get -v -t ./... - - go get golang.org/x/tools/cmd/cover - - go get github.com/onsi/gomega - - go install github.com/onsi/ginkgo/ginkgo + - GO111MODULE="off" go get -v -t ./... + - GO111MODULE="off" go get golang.org/x/tools/cmd/cover + - GO111MODULE="off" go get github.com/onsi/gomega + - GO111MODULE="off" go install github.com/onsi/ginkgo/ginkgo - export PATH=$PATH:$HOME/gopath/bin -script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace && go vet +script: + - GO111MODULE="on" go mod tidy + - diff -u <(echo -n) <(git diff go.mod) + - diff -u <(echo -n) <(git diff go.sum) + - $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace && go vet diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md index aeadb66e0..6092fcb63 100644 --- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md @@ -1,20 +1,102 @@ +## 1.14.1 + +### Fixes +- Discard exported method declaration when running ginkgo bootstrap (#558) [f4b0240] + +## 1.14.0 + +### Features +- Defer running top-level container nodes until RunSpecs is called [d44dedf] +- [Document Ginkgo lifecycle](http://onsi.github.io/ginkgo/#understanding-ginkgos-lifecycle) +- Add `extensions/globals` package (#692) [3295c8f] - this can be helpful in contexts where you are test-driving your test-generation code (see [#692](https://github.com/onsi/ginkgo/pull/692)) +- Print Skip reason in JUnit reporter if one was provided [820dfab] + +## 1.13.0 + +### Features +- Add a version of table.Entry that allows dumping the entry parameters. (#689) [21eaef2] + +### Fixes +- Ensure integration tests pass in an environment sans GOPATH [606fba2] +- Add books package (#568) [fc0e44e] +- doc(readme): installation via "tools package" (#677) [83bb20e] +- Solve the undefined: unix.Dup2 compile error on mips64le (#680) [0624f75] +- Import package without dot (#687) [6321024] +- Fix integration tests to stop require GOPATH (#686) [a912ec5] + +## 1.12.3 + +### Fixes +- Print correct code location of failing table test (#666) [c6d7afb] + +## 1.12.2 + +### Fixes +- Update dependencies [ea4a036] + +## 1.12.1 + +### Fixes +- Make unfocus ("blur") much faster (#674) [8b18061] +- Fix typo (#673) [7fdcbe8] +- Test against 1.14 and remove 1.12 [d5c2ad6] +- Test if a coverprofile content is empty before checking its latest character (#670) [14d9fa2] +- replace tail package with maintained one. this fixes go get errors (#667) [4ba33d4] +- improve ginkgo performance - makes progress on #644 [a14f98e] +- fix convert integration tests [1f8ba69] +- fix typo succesful -> successful (#663) [1ea49cf] +- Fix invalid link (#658) [b886136] +- convert utility : Include comments from source (#657) [1077c6d] +- Explain what BDD means [d79e7fb] +- skip race detector test on unsupported platform (#642) [f8ab89d] +- Use Dup2 from golang.org/x/sys/unix instead of syscallDup (#638) [5d53c55] +- Fix missing newline in combined coverage file (#641) [6a07ea2] +- check if a spec is run before returning SpecSummary (#645) [8850000] + +## 1.12.0 + +### Features +- Add module definition (#630) [78916ab] + +## 1.11.0 + +### Features +- Add syscall for riscv64 architecture [f66e896] +- teamcity reporter: output location of test failure as well as test definition (#626) [9869142] +- teamcity reporter: output newline after every service message (#625) [3cfa02d] +- Add support for go module when running `generate` command (#578) [9c89e3f] + +## 1.10.3 + +### Fixes +- Set go_import_path in travis.yml to allow internal packages in forks (#607) [3b721db] +- Add integration test [d90e0dc] +- Fix coverage files combining [e5dde8c] +- A new CLI option: -ginkgo.reportFile (#601) [034fd25] + +## 1.10.2 + +### Fixes +- speed up table entry generateIt() (#609) [5049dc5] +- Fix. Write errors to stderr instead of stdout (#610) [7bb3091] + ## 1.10.1 -## Fixes +### Fixes - stack backtrace: fix skipping (#600) [2a4c0bd] ## 1.10.0 -## Fixes +### Fixes - stack backtrace: fix alignment and skipping [66915d6] - fix typo in documentation [8f97b93] ## 1.9.0 -## Features +### Features - Option to print output into report, when tests have passed [0545415] -## Fixes +### Fixes - Fixed typos in comments [0ecbc58] - gofmt code [a7f8bfb] - Simplify code [7454d00] @@ -181,7 +263,7 @@ New Features: - `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command. - `ginkgo --failFast` aborts the test suite after the first failure. - `ginkgo generate file_1 file_2` can take multiple file arguments. -- Ginkgo now summarizes any spec failures that occured at the end of the test run. +- Ginkgo now summarizes any spec failures that occurred at the end of the test run. - `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed. Improvements: diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md index cdf8d054a..475e04994 100644 --- a/vendor/github.com/onsi/ginkgo/README.md +++ b/vendor/github.com/onsi/ginkgo/README.md @@ -1,29 +1,45 @@ -![Ginkgo: A Go BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png) +![Ginkgo: A Go BDD Testing Framework](https://onsi.github.io/ginkgo/images/ginkgo.png) [![Build Status](https://travis-ci.org/onsi/ginkgo.svg?branch=master)](https://travis-ci.org/onsi/ginkgo) -Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! +Jump to the [docs](https://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! -If you have a question, comment, bug report, feature request, etc. please open a GitHub issue. +If you have a question, comment, bug report, feature request, etc. please open a GitHub issue, or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW). + +## TLDR +Ginkgo builds on Go's `testing` package, allowing expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style tests. +It is typically (and optionally) paired with the [Gomega](https://github.com/onsi/gomega) matcher library. + +```go +Describe("the strings package", func() { + Context("strings.Contains()", func() { + When("the string contains the substring in the middle", func() { + It("returns `true`", func() { + Expect(strings.Contains("Ginkgo is awesome", "is")).To(BeTrue()) + }) + }) + }) +}) +``` ## Feature List -- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite) +- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](https://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](https://onsi.github.io/ginkgo/#adding-specs-to-a-suite) -- Structure your BDD-style tests expressively: - - Nestable [`Describe`, `Context` and `When` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context) - - [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown - - [`It` and `Specify` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions - - [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern). - - [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite. +- Ginkgo allows you to write tests in Go using expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style: + - Nestable [`Describe`, `Context` and `When` container blocks](https://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context) + - [`BeforeEach` and `AfterEach` blocks](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown + - [`It` and `Specify` blocks](https://onsi.github.io/ginkgo/#individual-specs-it) that hold your assertions + - [`JustBeforeEach` blocks](https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern). + - [`BeforeSuite` and `AfterSuite` blocks](https://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite. - A comprehensive test runner that lets you: - - Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs) - - [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line - - Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order. - - Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs) + - Mark specs as [pending](https://onsi.github.io/ginkgo/#pending-specs) + - [Focus](https://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line + - Run your tests in [random order](https://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order. + - Break up your test suite into parallel processes for straightforward [test parallelization](https://onsi.github.io/ginkgo/#parallel-specs) -- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples: +- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](https://onsi.github.io/ginkgo/#running-tests) and [generating](https://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples: - `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime - `ginkgo -cover` runs your tests using Go's code coverage tool - `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package @@ -37,37 +53,59 @@ If you have a question, comment, bug report, feature request, etc. please open a - `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop! -- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests) +- Built-in support for testing [asynchronicity](https://onsi.github.io/ginkgo/#asynchronous-tests) -- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code. +- Built-in support for [benchmarking](https://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code. - [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`. - [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`. -- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details. +- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](https://onsi.github.io/ginkgo/#third-party-integrations) for details. - A modular architecture that lets you easily: - - Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter). - - [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo + - Write [custom reporters](https://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](https://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter). + - [Adapt an existing matcher library (or write your own!)](https://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo -## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library +## [Gomega](https://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library -Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/) +Ginkgo is best paired with Gomega. Learn more about Gomega [here](https://onsi.github.io/gomega/) -## [Agouti](http://github.com/sclevine/agouti): A Go Acceptance Testing Framework +## [Agouti](https://github.com/sclevine/agouti): A Go Acceptance Testing Framework -Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org) +Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](https://agouti.org) -## Set Me Up! +## Getting Started -You'll need the Go command-line tools. Ginkgo is tested with Go 1.6+, but preferably you should get the latest. Follow the [installation instructions](https://golang.org/doc/install) if you don't have it installed. +You'll need the Go command-line tools. Follow the [installation instructions](https://golang.org/doc/install) if you don't have it installed. +### Global installation +To install the Ginkgo command line interface: ```bash +go get -u github.com/onsi/ginkgo/ginkgo +``` +Note that this will install it to `$GOBIN`, which will need to be in the `$PATH` (or equivalent). Run `go help install` for more information. + +### Go module ["tools package"](https://github.com/golang/go/issues/25922): +Create (or update) a file called `tools/tools.go` with the following contents: +```go +// +build tools -go get -u github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI -go get -u github.com/onsi/gomega/... # fetches the matcher library +package tools +import ( + _ "github.com/onsi/ginkgo/ginkgo" +) + +// This file imports packages that are used when running go generate, or used +// during the development process but not otherwise depended on by built code. +``` +The Ginkgo command can then be run via `go run github.com/onsi/ginkgo/ginkgo`. +This approach allows the version of Ginkgo to be maintained under source control for reproducible results, +and is well suited to automated test pipelines. + +### Bootstrapping +```bash cd path/to/package/you/want/to/test ginkgo bootstrap # set up a new ginkgo suite @@ -87,16 +125,16 @@ With that said, it's great to know what your options are :) ### What Go gives you out of the box -Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library. +Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](https://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library. ### Matcher libraries for Go's XUnit style tests A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction: - [testify](https://github.com/stretchr/testify) -- [gocheck](http://labix.org/gocheck) +- [gocheck](https://labix.org/gocheck) -You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests) +You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](https://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests) ### BDD style testing frameworks diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go index ac55a5ad2..feef2bcd6 100644 --- a/vendor/github.com/onsi/ginkgo/config/config.go +++ b/vendor/github.com/onsi/ginkgo/config/config.go @@ -1,7 +1,7 @@ /* Ginkgo accepts a number of configuration options. -These are documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli) +These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli) You can also learn more via @@ -20,7 +20,7 @@ import ( "fmt" ) -const VERSION = "1.10.1" +const VERSION = "1.14.1" type GinkgoConfigType struct { RandomSeed int64 @@ -53,6 +53,7 @@ type DefaultReporterConfigType struct { Verbose bool FullTrace bool ReportPassed bool + ReportFile string } var DefaultReporterConfig = DefaultReporterConfigType{} @@ -100,6 +101,8 @@ func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report") flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs") flagSet.BoolVar(&(DefaultReporterConfig.ReportPassed), prefix+"reportPassed", false, "If set, default reporter prints out captured output of passed tests.") + flagSet.StringVar(&(DefaultReporterConfig.ReportFile), prefix+"reportFile", "", "Override the default reporter output file path.") + } func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string { @@ -202,5 +205,9 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor result = append(result, fmt.Sprintf("--%sreportPassed", prefix)) } + if reporter.ReportFile != "" { + result = append(result, fmt.Sprintf("--%sreportFile=%s", prefix, reporter.ReportFile)) + } + return result } diff --git a/vendor/github.com/onsi/ginkgo/extensions/table/table.go b/vendor/github.com/onsi/ginkgo/extensions/table/table.go index ae8ab7d24..4b0027807 100644 --- a/vendor/github.com/onsi/ginkgo/extensions/table/table.go +++ b/vendor/github.com/onsi/ginkgo/extensions/table/table.go @@ -12,7 +12,9 @@ import ( "fmt" "reflect" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/global" + "github.com/onsi/ginkgo/types" ) /* @@ -40,9 +42,28 @@ Under the hood, `DescribeTable` simply generates a new Ginkgo `Describe`. Each It's important to understand that the `Describe`s and `It`s are generated at evaluation time (i.e. when Ginkgo constructs the tree of tests and before the tests run). Individual Entries can be focused (with FEntry) or marked pending (with PEntry or XEntry). In addition, the entire table can be focused or marked pending with FDescribeTable and PDescribeTable/XDescribeTable. + +A description function can be passed to Entry in place of the description. The function is then fed with the entry parameters to generate the description of the It corresponding to that particular Entry. + +For example: + + describe := func(desc string) func(int, int, bool) string { + return func(x, y int, expected bool) string { + return fmt.Sprintf("%s x=%d y=%d expected:%t", desc, x, y, expected) + } + } + + DescribeTable("a simple table", + func(x int, y int, expected bool) { + Ω(x > y).Should(Equal(expected)) + }, + Entry(describe("x > y"), 1, 0, true), + Entry(describe("x == y"), 0, 0, false), + Entry(describe("x < y"), 0, 1, false), + ) */ func DescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { - describeTable(description, itBody, entries, false, false) + describeTable(description, itBody, entries, types.FlagTypeNone) return true } @@ -50,7 +71,7 @@ func DescribeTable(description string, itBody interface{}, entries ...TableEntry You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`. */ func FDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { - describeTable(description, itBody, entries, false, true) + describeTable(description, itBody, entries, types.FlagTypeFocused) return true } @@ -58,7 +79,7 @@ func FDescribeTable(description string, itBody interface{}, entries ...TableEntr You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`. */ func PDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { - describeTable(description, itBody, entries, true, false) + describeTable(description, itBody, entries, types.FlagTypePending) return true } @@ -66,33 +87,24 @@ func PDescribeTable(description string, itBody interface{}, entries ...TableEntr You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`. */ func XDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { - describeTable(description, itBody, entries, true, false) + describeTable(description, itBody, entries, types.FlagTypePending) return true } -func describeTable(description string, itBody interface{}, entries []TableEntry, pending bool, focused bool) { +func describeTable(description string, itBody interface{}, entries []TableEntry, flag types.FlagType) { itBodyValue := reflect.ValueOf(itBody) if itBodyValue.Kind() != reflect.Func { panic(fmt.Sprintf("DescribeTable expects a function, got %#v", itBody)) } - if pending { - ginkgo.PDescribe(description, func() { + global.Suite.PushContainerNode( + description, + func() { for _, entry := range entries { entry.generateIt(itBodyValue) } - }) - } else if focused { - ginkgo.FDescribe(description, func() { - for _, entry := range entries { - entry.generateIt(itBodyValue) - } - }) - } else { - ginkgo.Describe(description, func() { - for _, entry := range entries { - entry.generateIt(itBodyValue) - } - }) - } + }, + flag, + codelocation.New(2), + ) } diff --git a/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go b/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go index 5fa645bce..4d9c237ad 100644 --- a/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go +++ b/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go @@ -1,52 +1,76 @@ package table import ( + "fmt" "reflect" - "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/global" + "github.com/onsi/ginkgo/types" ) /* TableEntry represents an entry in a table test. You generally use the `Entry` constructor. */ type TableEntry struct { - Description string - Parameters []interface{} - Pending bool - Focused bool + Description interface{} + Parameters []interface{} + Pending bool + Focused bool + codeLocation types.CodeLocation } func (t TableEntry) generateIt(itBody reflect.Value) { - if t.Pending { - ginkgo.PIt(t.Description) - return - } - - values := []reflect.Value{} - for i, param := range t.Parameters { - var value reflect.Value - - if param == nil { - inType := itBody.Type().In(i) - value = reflect.Zero(inType) - } else { - value = reflect.ValueOf(param) + var description string + descriptionValue := reflect.ValueOf(t.Description) + switch descriptionValue.Kind() { + case reflect.String: + description = descriptionValue.String() + case reflect.Func: + values := castParameters(descriptionValue, t.Parameters) + res := descriptionValue.Call(values) + if len(res) != 1 { + panic(fmt.Sprintf("The describe function should return only a value, returned %d", len(res))) + } + if res[0].Kind() != reflect.String { + panic(fmt.Sprintf("The describe function should return a string, returned %#v", res[0])) } + description = res[0].String() + default: + panic(fmt.Sprintf("Description can either be a string or a function, got %#v", descriptionValue)) + } - values = append(values, value) + if t.Pending { + global.Suite.PushItNode(description, func() {}, types.FlagTypePending, t.codeLocation, 0) + return } + values := castParameters(itBody, t.Parameters) body := func() { itBody.Call(values) } if t.Focused { - ginkgo.FIt(t.Description, body) + global.Suite.PushItNode(description, body, types.FlagTypeFocused, t.codeLocation, global.DefaultTimeout) } else { - ginkgo.It(t.Description, body) + global.Suite.PushItNode(description, body, types.FlagTypeNone, t.codeLocation, global.DefaultTimeout) } } +func castParameters(function reflect.Value, parameters []interface{}) []reflect.Value { + res := make([]reflect.Value, len(parameters)) + funcType := function.Type() + for i, param := range parameters { + if param == nil { + inType := funcType.In(i) + res[i] = reflect.Zero(inType) + } else { + res[i] = reflect.ValueOf(param) + } + } + return res +} + /* Entry constructs a TableEntry. @@ -55,27 +79,51 @@ Subsequent parameters are saved off and sent to the callback passed in to `Descr Each Entry ends up generating an individual Ginkgo It. */ -func Entry(description string, parameters ...interface{}) TableEntry { - return TableEntry{description, parameters, false, false} +func Entry(description interface{}, parameters ...interface{}) TableEntry { + return TableEntry{ + Description: description, + Parameters: parameters, + Pending: false, + Focused: false, + codeLocation: codelocation.New(1), + } } /* You can focus a particular entry with FEntry. This is equivalent to FIt. */ -func FEntry(description string, parameters ...interface{}) TableEntry { - return TableEntry{description, parameters, false, true} +func FEntry(description interface{}, parameters ...interface{}) TableEntry { + return TableEntry{ + Description: description, + Parameters: parameters, + Pending: false, + Focused: true, + codeLocation: codelocation.New(1), + } } /* You can mark a particular entry as pending with PEntry. This is equivalent to PIt. */ -func PEntry(description string, parameters ...interface{}) TableEntry { - return TableEntry{description, parameters, true, false} +func PEntry(description interface{}, parameters ...interface{}) TableEntry { + return TableEntry{ + Description: description, + Parameters: parameters, + Pending: true, + Focused: false, + codeLocation: codelocation.New(1), + } } /* You can mark a particular entry as pending with XEntry. This is equivalent to XIt. */ -func XEntry(description string, parameters ...interface{}) TableEntry { - return TableEntry{description, parameters, true, false} +func XEntry(description interface{}, parameters ...interface{}) TableEntry { + return TableEntry{ + Description: description, + Parameters: parameters, + Pending: true, + Focused: false, + codeLocation: codelocation.New(1), + } } diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go index 8734c061d..30ff86f59 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go @@ -22,9 +22,8 @@ import ( "github.com/onsi/ginkgo/config" "github.com/onsi/ginkgo/internal/codelocation" - "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/internal/global" "github.com/onsi/ginkgo/internal/remote" - "github.com/onsi/ginkgo/internal/suite" "github.com/onsi/ginkgo/internal/testingtproxy" "github.com/onsi/ginkgo/internal/writer" "github.com/onsi/ginkgo/reporters" @@ -46,16 +45,10 @@ To circumvent this, you should call at the top of the goroutine that caused this panic. ` -const defaultTimeout = 1 - -var globalSuite *suite.Suite -var globalFailer *failer.Failer func init() { config.Flags(flag.CommandLine, "ginkgo", true) GinkgoWriter = writer.New(os.Stdout) - globalFailer = failer.New() - globalSuite = suite.New(globalFailer) } //GinkgoWriter implements an io.Writer @@ -156,7 +149,7 @@ type GinkgoTestDescription struct { //CurrentGinkgoTestDescripton returns information about the current running test. func CurrentGinkgoTestDescription() GinkgoTestDescription { - summary, ok := globalSuite.CurrentRunningSpecSummary() + summary, ok := global.Suite.CurrentRunningSpecSummary() if !ok { return GinkgoTestDescription{} } @@ -199,6 +192,11 @@ type Benchmarker interface { // ginkgo bootstrap func RunSpecs(t GinkgoTestingT, description string) bool { specReporters := []Reporter{buildDefaultReporter()} + if config.DefaultReporterConfig.ReportFile != "" { + reportFile := config.DefaultReporterConfig.ReportFile + specReporters[0] = reporters.NewJUnitReporter(reportFile) + return RunSpecsWithDefaultAndCustomReporters(t, description, specReporters) + } return RunSpecsWithCustomReporters(t, description, specReporters) } @@ -218,7 +216,7 @@ func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specRepor for i, reporter := range specReporters { reporters[i] = reporter } - passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig) + passed, hasFocusedTests := global.Suite.Run(t, description, reporters, writer, config.GinkgoConfig) if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { fmt.Println("PASS | FOCUSED") os.Exit(types.GINKGO_FOCUS_EXIT_CODE) @@ -247,7 +245,7 @@ func Skip(message string, callerSkip ...int) { skip = callerSkip[0] } - globalFailer.Skip(message, codelocation.New(skip+1)) + global.Failer.Skip(message, codelocation.New(skip+1)) panic(GINKGO_PANIC) } @@ -258,7 +256,7 @@ func Fail(message string, callerSkip ...int) { skip = callerSkip[0] } - globalFailer.Fail(message, codelocation.New(skip+1)) + global.Failer.Fail(message, codelocation.New(skip+1)) panic(GINKGO_PANIC) } @@ -275,7 +273,7 @@ func Fail(message string, callerSkip ...int) { func GinkgoRecover() { e := recover() if e != nil { - globalFailer.Panic(codelocation.New(1), e) + global.Failer.Panic(codelocation.New(1), e) } } @@ -286,25 +284,25 @@ func GinkgoRecover() { //equivalent. The difference is purely semantic -- you typically Describe the behavior of an object //or method and, within that Describe, outline a number of Contexts and Whens. func Describe(text string, body func()) bool { - globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) + global.Suite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) return true } //You can focus the tests within a describe block using FDescribe func FDescribe(text string, body func()) bool { - globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) + global.Suite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) return true } //You can mark the tests within a describe block as pending using PDescribe func PDescribe(text string, body func()) bool { - globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) return true } //You can mark the tests within a describe block as pending using XDescribe func XDescribe(text string, body func()) bool { - globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) return true } @@ -315,25 +313,25 @@ func XDescribe(text string, body func()) bool { //equivalent. The difference is purely semantic -- you typical Describe the behavior of an object //or method and, within that Describe, outline a number of Contexts and Whens. func Context(text string, body func()) bool { - globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) + global.Suite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) return true } //You can focus the tests within a describe block using FContext func FContext(text string, body func()) bool { - globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) + global.Suite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) return true } //You can mark the tests within a describe block as pending using PContext func PContext(text string, body func()) bool { - globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) return true } //You can mark the tests within a describe block as pending using XContext func XContext(text string, body func()) bool { - globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) return true } @@ -344,25 +342,25 @@ func XContext(text string, body func()) bool { //equivalent. The difference is purely semantic -- you typical Describe the behavior of an object //or method and, within that Describe, outline a number of Contexts and Whens. func When(text string, body func()) bool { - globalSuite.PushContainerNode("when "+text, body, types.FlagTypeNone, codelocation.New(1)) + global.Suite.PushContainerNode("when "+text, body, types.FlagTypeNone, codelocation.New(1)) return true } //You can focus the tests within a describe block using FWhen func FWhen(text string, body func()) bool { - globalSuite.PushContainerNode("when "+text, body, types.FlagTypeFocused, codelocation.New(1)) + global.Suite.PushContainerNode("when "+text, body, types.FlagTypeFocused, codelocation.New(1)) return true } //You can mark the tests within a describe block as pending using PWhen func PWhen(text string, body func()) bool { - globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1)) + global.Suite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1)) return true } //You can mark the tests within a describe block as pending using XWhen func XWhen(text string, body func()) bool { - globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1)) + global.Suite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1)) return true } @@ -372,25 +370,25 @@ func XWhen(text string, body func()) bool { //Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a //function that accepts a Done channel. When you do this, you can also provide an optional timeout. func It(text string, body interface{}, timeout ...float64) bool { - globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) + global.Suite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) return true } //You can focus individual Its using FIt func FIt(text string, body interface{}, timeout ...float64) bool { - globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) + global.Suite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) return true } //You can mark Its as pending using PIt func PIt(text string, _ ...interface{}) bool { - globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) return true } //You can mark Its as pending using XIt func XIt(text string, _ ...interface{}) bool { - globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) return true } @@ -398,25 +396,25 @@ func XIt(text string, _ ...interface{}) bool { //which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks //which apply to It blocks. func Specify(text string, body interface{}, timeout ...float64) bool { - globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) + global.Suite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) return true } //You can focus individual Specifys using FSpecify func FSpecify(text string, body interface{}, timeout ...float64) bool { - globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) + global.Suite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) return true } //You can mark Specifys as pending using PSpecify func PSpecify(text string, is ...interface{}) bool { - globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) return true } //You can mark Specifys as pending using XSpecify func XSpecify(text string, is ...interface{}) bool { - globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) return true } @@ -447,25 +445,25 @@ func By(text string, callbacks ...func()) { //The body function must have the signature: // func(b Benchmarker) func Measure(text string, body interface{}, samples int) bool { - globalSuite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples) + global.Suite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples) return true } //You can focus individual Measures using FMeasure func FMeasure(text string, body interface{}, samples int) bool { - globalSuite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples) + global.Suite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples) return true } //You can mark Measurements as pending using PMeasure func PMeasure(text string, _ ...interface{}) bool { - globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) + global.Suite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) return true } //You can mark Measurements as pending using XMeasure func XMeasure(text string, _ ...interface{}) bool { - globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) + global.Suite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) return true } @@ -476,7 +474,7 @@ func XMeasure(text string, _ ...interface{}) bool { // //You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level. func BeforeSuite(body interface{}, timeout ...float64) bool { - globalSuite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) + global.Suite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) return true } @@ -489,7 +487,7 @@ func BeforeSuite(body interface{}, timeout ...float64) bool { // //You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level. func AfterSuite(body interface{}, timeout ...float64) bool { - globalSuite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) + global.Suite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) return true } @@ -534,7 +532,7 @@ func AfterSuite(body interface{}, timeout ...float64) bool { // Ω(err).ShouldNot(HaveOccurred()) // }) func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool { - globalSuite.SetSynchronizedBeforeSuiteNode( + global.Suite.SetSynchronizedBeforeSuiteNode( node1Body, allNodesBody, codelocation.New(1), @@ -561,7 +559,7 @@ func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, ti // dbRunner.Stop() // }) func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool { - globalSuite.SetSynchronizedAfterSuiteNode( + global.Suite.SetSynchronizedAfterSuiteNode( allNodesBody, node1Body, codelocation.New(1), @@ -576,7 +574,7 @@ func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, tim //Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts //a Done channel func BeforeEach(body interface{}, timeout ...float64) bool { - globalSuite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + global.Suite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) return true } @@ -586,7 +584,7 @@ func BeforeEach(body interface{}, timeout ...float64) bool { //Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts //a Done channel func JustBeforeEach(body interface{}, timeout ...float64) bool { - globalSuite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + global.Suite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) return true } @@ -596,7 +594,7 @@ func JustBeforeEach(body interface{}, timeout ...float64) bool { //Like It blocks, JustAfterEach blocks can be made asynchronous by providing a body function that accepts //a Done channel func JustAfterEach(body interface{}, timeout ...float64) bool { - globalSuite.PushJustAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + global.Suite.PushJustAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) return true } @@ -606,13 +604,13 @@ func JustAfterEach(body interface{}, timeout ...float64) bool { //Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts //a Done channel func AfterEach(body interface{}, timeout ...float64) bool { - globalSuite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + global.Suite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) return true } func parseTimeout(timeout ...float64) time.Duration { if len(timeout) == 0 { - return time.Duration(defaultTimeout * int64(time.Second)) + return global.DefaultTimeout } else { return time.Duration(timeout[0] * float64(time.Second)) } diff --git a/vendor/github.com/onsi/ginkgo/go.mod b/vendor/github.com/onsi/ginkgo/go.mod new file mode 100644 index 000000000..1f7125228 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/go.mod @@ -0,0 +1,11 @@ +module github.com/onsi/ginkgo + +require ( + github.com/fsnotify/fsnotify v1.4.9 // indirect + github.com/nxadm/tail v1.4.4 + github.com/onsi/gomega v1.10.1 + golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 + golang.org/x/text v0.3.2 // indirect +) + +go 1.13 diff --git a/vendor/github.com/onsi/ginkgo/go.sum b/vendor/github.com/onsi/ginkgo/go.sum new file mode 100644 index 000000000..2b774f3e8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/go.sum @@ -0,0 +1,67 @@ +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/onsi/ginkgo/internal/global/init.go b/vendor/github.com/onsi/ginkgo/internal/global/init.go new file mode 100644 index 000000000..109f617a5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/global/init.go @@ -0,0 +1,22 @@ +package global + +import ( + "time" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/internal/suite" +) + +const DefaultTimeout = time.Duration(1 * time.Second) + +var Suite *suite.Suite +var Failer *failer.Failer + +func init() { + InitializeGlobals() +} + +func InitializeGlobals() { + Failer = failer.New() + Suite = suite.New(Failer) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go index f9ab30067..992437d9e 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go +++ b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go @@ -197,11 +197,11 @@ func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) { switch specSummary.State { case types.SpecStatePassed: if specSummary.IsMeasurement { - aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct) + aggregator.stenographer.AnnounceSuccessfulMeasurement(specSummary, aggregator.config.Succinct) } else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold { - aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct) + aggregator.stenographer.AnnounceSuccessfulSlowSpec(specSummary, aggregator.config.Succinct) } else { - aggregator.stenographer.AnnounceSuccesfulSpec(specSummary) + aggregator.stenographer.AnnounceSuccessfulSpec(specSummary) } case types.SpecStatePending: diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_darwin.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_darwin.go new file mode 100644 index 000000000..e3d09eadb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_darwin.go @@ -0,0 +1,11 @@ +// +build darwin + +package remote + +import ( + "golang.org/x/sys/unix" +) + +func interceptorDupx(oldfd int, newfd int) { + unix.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_dragonfly.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_dragonfly.go new file mode 100644 index 000000000..72d38686a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_dragonfly.go @@ -0,0 +1,11 @@ +// +build dragonfly + +package remote + +import ( + "golang.org/x/sys/unix" +) + +func interceptorDupx(oldfd int, newfd int) { + unix.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_freebsd.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_freebsd.go new file mode 100644 index 000000000..497d548d9 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_freebsd.go @@ -0,0 +1,11 @@ +// +build freebsd + +package remote + +import ( + "golang.org/x/sys/unix" +) + +func interceptorDupx(oldfd int, newfd int) { + unix.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux.go new file mode 100644 index 000000000..29add0d33 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux.go @@ -0,0 +1,12 @@ +// +build linux +// +build !mips64le + +package remote + +import ( + "golang.org/x/sys/unix" +) + +func interceptorDupx(oldfd int, newfd int) { + unix.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux_mips64le.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux_mips64le.go new file mode 100644 index 000000000..09bd06260 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_linux_mips64le.go @@ -0,0 +1,12 @@ +// +build linux +// +build mips64le + +package remote + +import ( + "golang.org/x/sys/unix" +) + +func interceptorDupx(oldfd int, newfd int) { + unix.Dup3(oldfd, newfd, 0) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_netbsd.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_netbsd.go new file mode 100644 index 000000000..16ad6aeb2 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_netbsd.go @@ -0,0 +1,11 @@ +// +build netbsd + +package remote + +import ( + "golang.org/x/sys/unix" +) + +func interceptorDupx(oldfd int, newfd int) { + unix.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_openbsd.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_openbsd.go new file mode 100644 index 000000000..4275f8421 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_openbsd.go @@ -0,0 +1,11 @@ +// +build openbsd + +package remote + +import ( + "golang.org/x/sys/unix" +) + +func interceptorDupx(oldfd int, newfd int) { + unix.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_solaris.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_solaris.go new file mode 100644 index 000000000..882a38a9e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_solaris.go @@ -0,0 +1,11 @@ +// +build solaris + +package remote + +import ( + "golang.org/x/sys/unix" +) + +func interceptorDupx(oldfd int, newfd int) { + unix.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go index ab6622a29..80614d0ce 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go @@ -7,7 +7,7 @@ import ( "io/ioutil" "os" - "github.com/hpcloud/tail" + "github.com/nxadm/tail" ) func NewOutputInterceptor() OutputInterceptor { @@ -35,12 +35,8 @@ func (interceptor *outputInterceptor) StartInterceptingOutput() error { return err } - // Call a function in ./syscall_dup_*.go - // If building for everything other than linux_arm64, - // use a "normal" syscall.Dup2(oldfd, newfd) call. If building for linux_arm64 (which doesn't have syscall.Dup2) - // call syscall.Dup3(oldfd, newfd, 0). They are nearly identical, see: http://linux.die.net/man/2/dup3 - syscallDup(int(interceptor.redirectFile.Fd()), 1) - syscallDup(int(interceptor.redirectFile.Fd()), 2) + interceptorDupx(int(interceptor.redirectFile.Fd()), 1) + interceptorDupx(int(interceptor.redirectFile.Fd()), 2) if interceptor.streamTarget != nil { interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true}) diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go deleted file mode 100644 index 9550d37b3..000000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build linux,arm64 - -package remote - -import "syscall" - -// linux_arm64 doesn't have syscall.Dup2 which ginkgo uses, so -// use the nearly identical syscall.Dup3 instead -func syscallDup(oldfd int, newfd int) (err error) { - return syscall.Dup3(oldfd, newfd, 0) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go deleted file mode 100644 index 75ef7fb78..000000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build solaris - -package remote - -import "golang.org/x/sys/unix" - -func syscallDup(oldfd int, newfd int) (err error) { - return unix.Dup2(oldfd, newfd) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go deleted file mode 100644 index ef6255960..000000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !linux !arm64 -// +build !windows -// +build !solaris - -package remote - -import "syscall" - -func syscallDup(oldfd int, newfd int) (err error) { - return syscall.Dup2(oldfd, newfd) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go index 3104bbc88..e75da1f89 100644 --- a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go +++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go @@ -22,25 +22,37 @@ type ginkgoTestingT interface { Fail() } +type deferredContainerNode struct { + text string + body func() + flag types.FlagType + codeLocation types.CodeLocation +} + type Suite struct { topLevelContainer *containernode.ContainerNode currentContainer *containernode.ContainerNode - containerIndex int - beforeSuiteNode leafnodes.SuiteNode - afterSuiteNode leafnodes.SuiteNode - runner *specrunner.SpecRunner - failer *failer.Failer - running bool + + deferredContainerNodes []deferredContainerNode + + containerIndex int + beforeSuiteNode leafnodes.SuiteNode + afterSuiteNode leafnodes.SuiteNode + runner *specrunner.SpecRunner + failer *failer.Failer + running bool + expandTopLevelNodes bool } func New(failer *failer.Failer) *Suite { topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{}) return &Suite{ - topLevelContainer: topLevelContainer, - currentContainer: topLevelContainer, - failer: failer, - containerIndex: 1, + topLevelContainer: topLevelContainer, + currentContainer: topLevelContainer, + failer: failer, + containerIndex: 1, + deferredContainerNodes: []deferredContainerNode{}, } } @@ -53,6 +65,11 @@ func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []report panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total") } + suite.expandTopLevelNodes = true + for _, deferredNode := range suite.deferredContainerNodes { + suite.PushContainerNode(deferredNode.text, deferredNode.body, deferredNode.flag, deferredNode.codeLocation) + } + r := rand.New(rand.NewSource(config.RandomSeed)) suite.topLevelContainer.Shuffle(r) iterator, hasProgrammaticFocus := suite.generateSpecsIterator(description, config) @@ -102,6 +119,9 @@ func (suite *Suite) generateSpecsIterator(description string, config config.Gink } func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) { + if !suite.running { + return nil, false + } return suite.runner.CurrentSpecSummary() } @@ -134,6 +154,23 @@ func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB inter } func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) { + /* + We defer walking the container nodes (which immediately evaluates the `body` function) + until `RunSpecs` is called. We do this by storing off the deferred container nodes. Then, when + `RunSpecs` is called we actually go through and add the container nodes to the test structure. + + This allows us to defer calling all the `body` functions until _after_ the top level functions + have been walked, _after_ func init()s have been called, and _after_ `go test` has called `flag.Parse()`. + + This allows users to load up configuration information in the `TestX` go test hook just before `RunSpecs` + is invoked and solves issues like #693 and makes the lifecycle easier to reason about. + + */ + if !suite.expandTopLevelNodes { + suite.deferredContainerNodes = append(suite.deferredContainerNodes, deferredContainerNode{text, body, flag, codeLocation}) + return + } + container := containernode.New(text, flag, codeLocation) suite.currentContainer.PushContainerNode(container) diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go index c76283b46..f0c9f6141 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go @@ -57,11 +57,11 @@ func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) switch specSummary.State { case types.SpecStatePassed: if specSummary.IsMeasurement { - reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct) + reporter.stenographer.AnnounceSuccessfulMeasurement(specSummary, reporter.config.Succinct) } else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold { - reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct) + reporter.stenographer.AnnounceSuccessfulSlowSpec(specSummary, reporter.config.Succinct) } else { - reporter.stenographer.AnnounceSuccesfulSpec(specSummary) + reporter.stenographer.AnnounceSuccessfulSpec(specSummary) if reporter.config.ReportPassed { reporter.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput) } diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go index 89a7c8465..963caaaff 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go +++ b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go @@ -13,6 +13,7 @@ import ( "fmt" "math" "os" + "path/filepath" "strings" "github.com/onsi/ginkgo/config" @@ -49,7 +50,7 @@ type JUnitFailureMessage struct { } type JUnitSkipped struct { - XMLName xml.Name `xml:"skipped"` + Message string `xml:",chardata"` } type JUnitReporter struct { @@ -131,6 +132,9 @@ func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) { } if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { testCase.Skipped = &JUnitSkipped{} + if specSummary.Failure.Message != "" { + testCase.Skipped.Message = failureMessage(specSummary.Failure) + } } testCase.Time = specSummary.RunTime.Seconds() reporter.suite.TestCases = append(reporter.suite.TestCases, testCase) @@ -141,17 +145,29 @@ func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { reporter.suite.Time = math.Trunc(summary.RunTime.Seconds()*1000) / 1000 reporter.suite.Failures = summary.NumberOfFailedSpecs reporter.suite.Errors = 0 - file, err := os.Create(reporter.filename) + if reporter.ReporterConfig.ReportFile != "" { + reporter.filename = reporter.ReporterConfig.ReportFile + fmt.Printf("\nJUnit path was configured: %s\n", reporter.filename) + } + filePath, _ := filepath.Abs(reporter.filename) + dirPath := filepath.Dir(filePath) + err := os.MkdirAll(dirPath, os.ModePerm) if err != nil { - fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error()) + fmt.Printf("\nFailed to create JUnit directory: %s\n\t%s", filePath, err.Error()) + } + file, err := os.Create(filePath) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to create JUnit report file: %s\n\t%s", filePath, err.Error()) } defer file.Close() file.WriteString(xml.Header) encoder := xml.NewEncoder(file) encoder.Indent(" ", " ") err = encoder.Encode(reporter.suite) - if err != nil { - fmt.Printf("Failed to generate JUnit report\n\t%s", err.Error()) + if err == nil { + fmt.Fprintf(os.Stdout, "\nJUnit report was created: %s\n", filePath) + } else { + fmt.Fprintf(os.Stderr,"\nFailed to generate JUnit report data:\n\t%s", err.Error()) } } diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go index 98854e7d9..1aa5b9db0 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go @@ -105,16 +105,16 @@ func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) { stenographer.registerCall("AnnounceCapturedOutput", output) } -func (stenographer *FakeStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) { - stenographer.registerCall("AnnounceSuccesfulSpec", spec) +func (stenographer *FakeStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) { + stenographer.registerCall("AnnounceSuccessfulSpec", spec) } -func (stenographer *FakeStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) { - stenographer.registerCall("AnnounceSuccesfulSlowSpec", spec, succinct) +func (stenographer *FakeStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) { + stenographer.registerCall("AnnounceSuccessfulSlowSpec", spec, succinct) } -func (stenographer *FakeStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) { - stenographer.registerCall("AnnounceSuccesfulMeasurement", spec, succinct) +func (stenographer *FakeStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) { + stenographer.registerCall("AnnounceSuccessfulMeasurement", spec, succinct) } func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) { diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go index 601c74d66..638d6fbb1 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go @@ -47,9 +47,9 @@ type Stenographer interface { AnnounceCapturedOutput(output string) - AnnounceSuccesfulSpec(spec *types.SpecSummary) - AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) - AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) + AnnounceSuccessfulSpec(spec *types.SpecSummary) + AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) + AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) @@ -245,12 +245,12 @@ func (s *consoleStenographer) AnnounceCapturedOutput(output string) { s.midBlock() } -func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) { +func (s *consoleStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) { s.print(0, s.colorize(greenColor, s.denoter)) s.stream() } -func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) { +func (s *consoleStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) { s.printBlockWithMessage( s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()), "", @@ -259,7 +259,7 @@ func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, ) } -func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) { +func (s *consoleStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) { s.printBlockWithMessage( s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter), s.measurementReport(spec, succinct), diff --git a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go index c8e27b2a7..84fd8aff8 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go +++ b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go @@ -35,7 +35,7 @@ func NewTeamCityReporter(writer io.Writer) *TeamCityReporter { func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { reporter.testSuiteName = escape(summary.SuiteDescription) - fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']", messageId, reporter.testSuiteName) + fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']\n", messageId, reporter.testSuiteName) } func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { @@ -49,18 +49,18 @@ func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSumm func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) { if setupSummary.State != types.SpecStatePassed { testName := escape(name) - fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName) - message := escape(setupSummary.Failure.ComponentCodeLocation.String()) - details := escape(setupSummary.Failure.Message) - fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details) + fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']\n", messageId, testName) + message := reporter.failureMessage(setupSummary.Failure) + details := reporter.failureDetails(setupSummary.Failure) + fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']\n", messageId, testName, message, details) durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000 - fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds) + fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']\n", messageId, testName, durationInMilliseconds) } } func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) { testName := escape(strings.Join(specSummary.ComponentTexts[1:], " ")) - fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName) + fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']\n", messageId, testName) } func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) { @@ -68,23 +68,31 @@ func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed { details := escape(specSummary.CapturedOutput) - fmt.Fprintf(reporter.writer, "%s[testPassed name='%s' details='%s']", messageId, testName, details) + fmt.Fprintf(reporter.writer, "%s[testPassed name='%s' details='%s']\n", messageId, testName, details) } if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { - message := escape(specSummary.Failure.ComponentCodeLocation.String()) - details := escape(specSummary.Failure.Message) - fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details) + message := reporter.failureMessage(specSummary.Failure) + details := reporter.failureDetails(specSummary.Failure) + fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']\n", messageId, testName, message, details) } if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { - fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']", messageId, testName) + fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']\n", messageId, testName) } durationInMilliseconds := specSummary.RunTime.Seconds() * 1000 - fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds) + fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']\n", messageId, testName, durationInMilliseconds) } func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { - fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']", messageId, reporter.testSuiteName) + fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']\n", messageId, reporter.testSuiteName) +} + +func (reporter *TeamCityReporter) failureMessage(failure types.SpecFailure) string { + return escape(failure.ComponentCodeLocation.String()) +} + +func (reporter *TeamCityReporter) failureDetails(failure types.SpecFailure) string { + return escape(fmt.Sprintf("%s\n%s", failure.Message, failure.Location.String())) } func escape(output string) string { diff --git a/vendor/github.com/onsi/ginkgo/types/types.go b/vendor/github.com/onsi/ginkgo/types/types.go index e4e32b761..c143e02d8 100644 --- a/vendor/github.com/onsi/ginkgo/types/types.go +++ b/vendor/github.com/onsi/ginkgo/types/types.go @@ -12,7 +12,7 @@ SuiteSummary represents the a summary of the test suite and is passed to both Reporter.SpecSuiteWillBegin Reporter.SpecSuiteDidEnd -this is unfortunate as these two methods should receive different objects. When running in parallel +this is unfortunate as these two methods should receive different objects. When running in parallel each node does not deterministically know how many specs it will end up running. Unfortunately making such a change would break backward compatibility. diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml index d147e451d..348e3014c 100644 --- a/vendor/github.com/onsi/gomega/.travis.yml +++ b/vendor/github.com/onsi/gomega/.travis.yml @@ -1,9 +1,11 @@ language: go +arch: + - amd64 + - ppc64le go: - - 1.10.x - - 1.11.x - - 1.12.x + - 1.14.x + - 1.15.x - gotip env: diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index f67074016..16095fa3c 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,63 @@ +## 1.10.5 + +### Fixes +- fix: collections matchers should display type of expectation (#408) [6b4eb5a] +- fix(ContainElements): consistently flatten expected values [073b880] +- fix(ConsistOf): consistently flatten expected values [7266efe] + +## 1.10.4 + +### Fixes +- update golang net library to more recent version without vulnerability (#406) [817a8b9] +- Correct spelling: alloted -> allotted (#403) [0bae715] +- fix a panic in MessageWithDiff with long message (#402) [ea06b9b] + +## 1.10.3 + +### Fixes +- updates golang/x/net to fix vulnerability detected by snyk (#394) [c479356] + +## 1.10.2 + +### Fixes +- Add ExpectWithOffset, EventuallyWithOffset and ConsistentlyWithOffset to WithT (#391) [990941a] + +## 1.10.1 + +### Fixes +- Update dependencies (#389) [9f5eecd] + +## 1.10.0 + +### Features +- Add HaveHTTPStatusMatcher (#378) [f335c94] +- Changed matcher for content-type in VerifyJSONRepresenting (#377) [6024f5b] +- Make ghttp usable with x-unit style tests (#376) [c0be499] +- Implement PanicWith matcher (#381) [f8032b4] + +## 1.9.0 + +### Features +- Add ContainElements matcher (#370) [2f57380] +- Output missing and extra elements in ConsistOf failure message [a31eda7] +- Document method LargestMatching [7c5a280] + +## 1.8.1 + +### Fixes +- Fix unexpected MatchError() behaviour (#375) [8ae7b2f] + +## 1.8.0 + +### Features +- Allow optional description to be lazily evaluated function (#364) [bf64010] +- Support wrapped errors (#359) [0a981cb] + +## 1.7.1 + +### Fixes +- Bump go-yaml version to cover fixed ddos heuristic (#362) [95e431e] + ## 1.7.0 ### Features diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go index fae25adce..e59d7d75b 100644 --- a/vendor/github.com/onsi/gomega/format/format.go +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -105,7 +105,13 @@ func MessageWithDiff(actual, message, expected string) string { tabLength := 4 spaceFromMessageToActual := tabLength + len(": ") - len(message) - padding := strings.Repeat(" ", spaceFromMessageToActual+spacesBeforeFormattedMismatch) + "|" + + paddingCount := spaceFromMessageToActual + spacesBeforeFormattedMismatch + if paddingCount < 0 { + return Message(formattedActual, message, formattedExpected) + } + + padding := strings.Repeat(" ", paddingCount) + "|" return Message(formattedActual, message+padding, formattedExpected) } diff --git a/vendor/github.com/onsi/gomega/gbytes/io_wrappers.go b/vendor/github.com/onsi/gomega/gbytes/io_wrappers.go index 3caed8769..a41ad6232 100644 --- a/vendor/github.com/onsi/gomega/gbytes/io_wrappers.go +++ b/vendor/github.com/onsi/gomega/gbytes/io_wrappers.go @@ -9,17 +9,17 @@ import ( // ErrTimeout is returned by TimeoutCloser, TimeoutReader, and TimeoutWriter when the underlying Closer/Reader/Writer does not return within the specified timeout var ErrTimeout = errors.New("timeout occurred") -// TimeoutCloser returns an io.Closer that wraps the passed-in io.Closer. If the underlying Closer fails to close within the alloted timeout ErrTimeout is returned. +// TimeoutCloser returns an io.Closer that wraps the passed-in io.Closer. If the underlying Closer fails to close within the allotted timeout ErrTimeout is returned. func TimeoutCloser(c io.Closer, timeout time.Duration) io.Closer { return timeoutReaderWriterCloser{c: c, d: timeout} } -// TimeoutReader returns an io.Reader that wraps the passed-in io.Reader. If the underlying Reader fails to read within the alloted timeout ErrTimeout is returned. +// TimeoutReader returns an io.Reader that wraps the passed-in io.Reader. If the underlying Reader fails to read within the allotted timeout ErrTimeout is returned. func TimeoutReader(r io.Reader, timeout time.Duration) io.Reader { return timeoutReaderWriterCloser{r: r, d: timeout} } -// TimeoutWriter returns an io.Writer that wraps the passed-in io.Writer. If the underlying Writer fails to write within the alloted timeout ErrTimeout is returned. +// TimeoutWriter returns an io.Writer that wraps the passed-in io.Writer. If the underlying Writer fails to write within the allotted timeout ErrTimeout is returned. func TimeoutWriter(w io.Writer, timeout time.Duration) io.Writer { return timeoutReaderWriterCloser{w: w, d: timeout} } diff --git a/vendor/github.com/onsi/gomega/go.mod b/vendor/github.com/onsi/gomega/go.mod index 65eedf696..6f853a579 100644 --- a/vendor/github.com/onsi/gomega/go.mod +++ b/vendor/github.com/onsi/gomega/go.mod @@ -1,15 +1,10 @@ module github.com/onsi/gomega +go 1.14 + require ( - github.com/fsnotify/fsnotify v1.4.7 // indirect - github.com/golang/protobuf v1.2.0 - github.com/hpcloud/tail v1.0.0 // indirect - github.com/onsi/ginkgo v1.6.0 - golang.org/x/net v0.0.0-20180906233101-161cd47e91fd - golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect - golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e // indirect - golang.org/x/text v0.3.0 // indirect - gopkg.in/fsnotify.v1 v1.4.7 // indirect - gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - gopkg.in/yaml.v2 v2.2.1 + github.com/golang/protobuf v1.4.2 + github.com/onsi/ginkgo v1.12.1 + golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb + gopkg.in/yaml.v2 v2.3.0 ) diff --git a/vendor/github.com/onsi/gomega/go.sum b/vendor/github.com/onsi/gomega/go.sum index b23f6ef02..54eeacd2b 100644 --- a/vendor/github.com/onsi/gomega/go.sum +++ b/vendor/github.com/onsi/gomega/go.sum @@ -2,23 +2,65 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index b145768cf..1bc5288b8 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -24,7 +24,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.7.0" +const GOMEGA_VERSION = "1.10.5" const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -252,7 +252,7 @@ func Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { return ConsistentlyWithOffset(0, actual, intervals...) } -// ConsistentlyWithOffset operates like Consistnetly but takes an additional +// ConsistentlyWithOffset operates like Consistently but takes an additional // initial argument to indicate an offset in the call stack. This is useful when building helper // functions that contain matchers. To learn more, read about `ExpectWithOffset`. func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { @@ -293,16 +293,18 @@ func SetDefaultConsistentlyPollingInterval(t time.Duration) { // AsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against // the matcher passed to the Should and ShouldNot methods. // -// Both Should and ShouldNot take a variadic optionalDescription argument. This is passed on to -// fmt.Sprintf() and is used to annotate failure messages. This allows you to make your failure messages more -// descriptive. +// Both Should and ShouldNot take a variadic optionalDescription argument. +// This argument allows you to make your failure messages more descriptive. +// If a single argument of type `func() string` is passed, this function will be lazily evaluated if a failure occurs +// and the returned string is used to annotate the failure message. +// Otherwise, this argument is passed on to fmt.Sprintf() and then used to annotate the failure message. // // Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed. // // Example: // // Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") -// Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.") +// Consistently(myChannel).ShouldNot(Receive(), func() string { return "Nothing should have come down the pipe." }) type AsyncAssertion interface { Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool @@ -317,8 +319,11 @@ type GomegaAsyncAssertion = AsyncAssertion // Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect // though this is not enforced. // -// All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf() -// and is used to annotate failure messages. +// All methods take a variadic optionalDescription argument. +// This argument allows you to make your failure messages more descriptive. +// If a single argument of type `func() string` is passed, this function will be lazily evaluated if a failure occurs +// and the returned string is used to annotate the failure message. +// Otherwise, this argument is passed on to fmt.Sprintf() and then used to annotate the failure message. // // All methods return a bool that is true if the assertion passed and false if it failed. // @@ -371,13 +376,13 @@ func NewGomegaWithT(t types.GomegaTestingT) *GomegaWithT { return NewWithT(t) } -// Expect is used to make assertions. See documentation for Expect. -func (g *WithT) Expect(actual interface{}, extra ...interface{}) Assertion { - return assertion.New(actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), 0, extra...) +// ExpectWithOffset is used to make assertions. See documentation for ExpectWithOffset. +func (g *WithT) ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion { + return assertion.New(actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), offset, extra...) } -// Eventually is used to make asynchronous assertions. See documentation for Eventually. -func (g *WithT) Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { +// EventuallyWithOffset is used to make asynchronous assertions. See documentation for EventuallyWithOffset. +func (g *WithT) EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { timeoutInterval := defaultEventuallyTimeout pollingInterval := defaultEventuallyPollingInterval if len(intervals) > 0 { @@ -386,11 +391,11 @@ func (g *WithT) Eventually(actual interface{}, intervals ...interface{}) AsyncAs if len(intervals) > 1 { pollingInterval = toDuration(intervals[1]) } - return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0) + return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, offset) } -// Consistently is used to make asynchronous assertions. See documentation for Consistently. -func (g *WithT) Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { +// ConsistentlyWithOffset is used to make asynchronous assertions. See documentation for ConsistentlyWithOffset. +func (g *WithT) ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { timeoutInterval := defaultConsistentlyDuration pollingInterval := defaultConsistentlyPollingInterval if len(intervals) > 0 { @@ -399,7 +404,22 @@ func (g *WithT) Consistently(actual interface{}, intervals ...interface{}) Async if len(intervals) > 1 { pollingInterval = toDuration(intervals[1]) } - return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0) + return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, offset) +} + +// Expect is used to make assertions. See documentation for Expect. +func (g *WithT) Expect(actual interface{}, extra ...interface{}) Assertion { + return g.ExpectWithOffset(0, actual, extra...) +} + +// Eventually is used to make asynchronous assertions. See documentation for Eventually. +func (g *WithT) Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { + return g.EventuallyWithOffset(0, actual, intervals...) +} + +// Consistently is used to make asynchronous assertions. See documentation for Consistently. +func (g *WithT) Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { + return g.ConsistentlyWithOffset(0, actual, intervals...) } func toDuration(input interface{}) time.Duration { @@ -427,3 +447,32 @@ func toDuration(input interface{}) time.Duration { panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input)) } + +// Gomega describes the essential Gomega DSL. This interface allows libraries +// to abstract between the standard package-level function implementations +// and alternatives like *WithT. +type Gomega interface { + Expect(actual interface{}, extra ...interface{}) Assertion + Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion + Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion +} + +type globalFailHandlerGomega struct{} + +// DefaultGomega supplies the standard package-level implementation +var Default Gomega = globalFailHandlerGomega{} + +// Expect is used to make assertions. See documentation for Expect. +func (globalFailHandlerGomega) Expect(actual interface{}, extra ...interface{}) Assertion { + return Expect(actual, extra...) +} + +// Eventually is used to make asynchronous assertions. See documentation for Eventually. +func (globalFailHandlerGomega) Eventually(actual interface{}, extra ...interface{}) AsyncAssertion { + return Eventually(actual, extra...) +} + +// Consistently is used to make asynchronous assertions. See documentation for Consistently. +func (globalFailHandlerGomega) Consistently(actual interface{}, extra ...interface{}) AsyncAssertion { + return Consistently(actual, extra...) +} diff --git a/vendor/github.com/onsi/gomega/internal/assertion/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go index 00197b67a..a248298f4 100644 --- a/vendor/github.com/onsi/gomega/internal/assertion/assertion.go +++ b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go @@ -52,16 +52,19 @@ func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) switch len(optionalDescription) { case 0: return "" - default: - return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" + case 1: + if describe, ok := optionalDescription[0].(func() string); ok { + return describe() + "\n" + } } + return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" } func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { matches, err := matcher.Match(assertion.actualInput) - description := assertion.buildDescription(optionalDescription...) assertion.failWrapper.TWithHelper.Helper() if err != nil { + description := assertion.buildDescription(optionalDescription...) assertion.failWrapper.Fail(description+err.Error(), 2+assertion.offset) return false } @@ -72,6 +75,7 @@ func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool } else { message = matcher.NegatedFailureMessage(assertion.actualInput) } + description := assertion.buildDescription(optionalDescription...) assertion.failWrapper.Fail(description+message, 2+assertion.offset) return false } diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go index a233e48c0..5204836bf 100644 --- a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go @@ -60,9 +60,12 @@ func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interfa switch len(optionalDescription) { case 0: return "" - default: - return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" + case 1: + if describe, ok := optionalDescription[0].(func() string); ok { + return describe() + "\n" + } } + return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" } func (assertion *AsyncAssertion) actualInputIsAFunction() bool { @@ -103,8 +106,6 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch timer := time.Now() timeout := time.After(assertion.timeoutInterval) - description := assertion.buildDescription(optionalDescription...) - var matches bool var err error mayChange := true @@ -129,6 +130,7 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch } } assertion.failWrapper.TWithHelper.Helper() + description := assertion.buildDescription(optionalDescription...) assertion.failWrapper.Fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset) } diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index 9ec8893cb..16218d4c5 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -306,6 +306,20 @@ func ConsistOf(elements ...interface{}) types.GomegaMatcher { } } +//ContainElements succeeds if actual contains the passed in elements. The ordering of the elements does not matter. +//By default ContainElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: +// +// Expect([]string{"Foo", "FooBar"}).Should(ContainElements("FooBar")) +// Expect([]string{"Foo", "FooBar"}).Should(ContainElements(ContainSubstring("Bar"), "Foo")) +// +//Actual must be an array, slice or map. +//For maps, ContainElements searches through the map's values. +func ContainElements(elements ...interface{}) types.GomegaMatcher { + return &matchers.ContainElementsMatcher{ + Elements: elements, + } +} + //HaveKey succeeds if actual is a map with the passed in key. //By default HaveKey uses Equal() to perform the match, however a //matcher can be passed in instead: @@ -376,6 +390,16 @@ func Panic() types.GomegaMatcher { return &matchers.PanicMatcher{} } +//PanicWith succeeds if actual is a function that, when invoked, panics with a specific value. +//Actual must be a function that takes no arguments and returns no results. +// +//By default PanicWith uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Expect(fn).Should(PanicWith(MatchRegexp(`.+Foo$`))) +func PanicWith(expected interface{}) types.GomegaMatcher { + return &matchers.PanicMatcher{Expected: expected} +} + //BeAnExistingFile succeeds if a file exists. //Actual must be a string representing the abs path to the file being checked. func BeAnExistingFile() types.GomegaMatcher { @@ -394,6 +418,15 @@ func BeADirectory() types.GomegaMatcher { return &matchers.BeADirectoryMatcher{} } +//HaveHTTPStatus succeeds if the Status or StatusCode field of an HTTP response matches. +//Actual must be either a *http.Response or *httptest.ResponseRecorder. +//Expected must be either an int or a string. +// Expect(resp).Should(HaveHTTPStatus(http.StatusOK)) // asserts that resp.StatusCode == 200 +// Expect(resp).Should(HaveHTTPStatus("404 Not Found")) // asserts that resp.Status == "404 Not Found" +func HaveHTTPStatus(expected interface{}) types.GomegaMatcher { + return &matchers.HaveHTTPStatusMatcher{Expected: expected} +} + //And succeeds only if all of the given matchers succeed. //The matchers are tried in order, and will fail-fast if one doesn't succeed. // Expect("hi").To(And(HaveLen(2), Equal("hi")) diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go index cbbf61802..e8ef0dee1 100644 --- a/vendor/github.com/onsi/gomega/matchers/consist_of.go +++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go @@ -11,7 +11,9 @@ import ( ) type ConsistOfMatcher struct { - Elements []interface{} + Elements []interface{} + missingElements []interface{} + extraElements []interface{} } func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) { @@ -19,44 +21,90 @@ func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err er return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) } - elements := matcher.Elements - if len(matcher.Elements) == 1 && isArrayOrSlice(matcher.Elements[0]) { - elements = []interface{}{} - value := reflect.ValueOf(matcher.Elements[0]) - for i := 0; i < value.Len(); i++ { - elements = append(elements, value.Index(i).Interface()) + matchers := matchers(matcher.Elements) + values := valuesOf(actual) + + bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(values, matchers, neighbours) + if err != nil { + return false, err + } + + edges := bipartiteGraph.LargestMatching() + if len(edges) == len(values) && len(edges) == len(matchers) { + return true, nil + } + + var missingMatchers []interface{} + matcher.extraElements, missingMatchers = bipartiteGraph.FreeLeftRight(edges) + matcher.missingElements = equalMatchersToElements(missingMatchers) + + return false, nil +} + +func neighbours(value, matcher interface{}) (bool, error) { + match, err := matcher.(omegaMatcher).Match(value) + return match && err == nil, nil +} + +func equalMatchersToElements(matchers []interface{}) (elements []interface{}) { + for _, matcher := range matchers { + equalMatcher, ok := matcher.(*EqualMatcher) + if ok { + matcher = equalMatcher.Expected } + elements = append(elements, matcher) } + return +} - matchers := []interface{}{} - for _, element := range elements { - matcher, isMatcher := element.(omegaMatcher) +func flatten(elems []interface{}) []interface{} { + if len(elems) != 1 || !isArrayOrSlice(elems[0]) { + return elems + } + + value := reflect.ValueOf(elems[0]) + flattened := make([]interface{}, value.Len()) + for i := 0; i < value.Len(); i++ { + flattened[i] = value.Index(i).Interface() + } + return flattened +} + +func matchers(expectedElems []interface{}) (matchers []interface{}) { + for _, e := range flatten(expectedElems) { + matcher, isMatcher := e.(omegaMatcher) if !isMatcher { - matcher = &EqualMatcher{Expected: element} + matcher = &EqualMatcher{Expected: e} } matchers = append(matchers, matcher) } + return +} - values := matcher.valuesOf(actual) +func presentable(elems []interface{}) interface{} { + elems = flatten(elems) - if len(values) != len(matchers) { - return false, nil + if len(elems) == 0 { + return []interface{}{} } - neighbours := func(v, m interface{}) (bool, error) { - match, err := m.(omegaMatcher).Match(v) - return match && err == nil, nil + sv := reflect.ValueOf(elems) + tt := sv.Index(0).Elem().Type() + for i := 1; i < sv.Len(); i++ { + if sv.Index(i).Elem().Type() != tt { + return elems + } } - bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(values, matchers, neighbours) - if err != nil { - return false, err + ss := reflect.MakeSlice(reflect.SliceOf(tt), sv.Len(), sv.Len()) + for i := 0; i < sv.Len(); i++ { + ss.Index(i).Set(sv.Index(i).Elem()) } - return len(bipartiteGraph.LargestMatching()) == len(values), nil + return ss.Interface() } -func (matcher *ConsistOfMatcher) valuesOf(actual interface{}) []interface{} { +func valuesOf(actual interface{}) []interface{} { value := reflect.ValueOf(actual) values := []interface{}{} if isMap(actual) { @@ -74,9 +122,23 @@ func (matcher *ConsistOfMatcher) valuesOf(actual interface{}) []interface{} { } func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to consist of", matcher.Elements) + message = format.Message(actual, "to consist of", presentable(matcher.Elements)) + message = appendMissingElements(message, matcher.missingElements) + if len(matcher.extraElements) > 0 { + message = fmt.Sprintf("%s\nthe extra elements were\n%s", message, + format.Object(presentable(matcher.extraElements), 1)) + } + return +} + +func appendMissingElements(message string, missingElements []interface{}) string { + if len(missingElements) == 0 { + return message + } + return fmt.Sprintf("%s\nthe missing elements were\n%s", message, + format.Object(presentable(missingElements), 1)) } func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to consist of", matcher.Elements) + return format.Message(actual, "not to consist of", presentable(matcher.Elements)) } diff --git a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go new file mode 100644 index 000000000..946cd8bea --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go @@ -0,0 +1,44 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph" +) + +type ContainElementsMatcher struct { + Elements []interface{} + missingElements []interface{} +} + +func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool, err error) { + if !isArrayOrSlice(actual) && !isMap(actual) { + return false, fmt.Errorf("ContainElements matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + } + + matchers := matchers(matcher.Elements) + bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(valuesOf(actual), matchers, neighbours) + if err != nil { + return false, err + } + + edges := bipartiteGraph.LargestMatching() + if len(edges) == len(matchers) { + return true, nil + } + + _, missingMatchers := bipartiteGraph.FreeLeftRight(edges) + matcher.missingElements = equalMatchersToElements(missingMatchers) + + return false, nil +} + +func (matcher *ContainElementsMatcher) FailureMessage(actual interface{}) (message string) { + message = format.Message(actual, "to contain elements", presentable(matcher.Elements)) + return appendMissingElements(message, matcher.missingElements) +} + +func (matcher *ContainElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain elements", presentable(matcher.Elements)) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go new file mode 100644 index 000000000..3ce4800b7 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go @@ -0,0 +1,42 @@ +package matchers + +import ( + "fmt" + "net/http" + "net/http/httptest" + + "github.com/onsi/gomega/format" +) + +type HaveHTTPStatusMatcher struct { + Expected interface{} +} + +func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, err error) { + var resp *http.Response + switch a := actual.(type) { + case *http.Response: + resp = a + case *httptest.ResponseRecorder: + resp = a.Result() + default: + return false, fmt.Errorf("HaveHTTPStatus matcher expects *http.Response or *httptest.ResponseRecorder. Got:\n%s", format.Object(actual, 1)) + } + + switch e := matcher.Expected.(type) { + case int: + return resp.StatusCode == e, nil + case string: + return resp.Status == e, nil + } + + return false, fmt.Errorf("HaveHTTPStatus matcher must be passed an int or a string. Got:\n%s", format.Object(matcher.Expected, 1)) +} + +func (matcher *HaveHTTPStatusMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to have HTTP status", matcher.Expected) +} + +func (matcher *HaveHTTPStatusMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to have HTTP status", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go index 07499ac95..c8993a86d 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go @@ -1,6 +1,7 @@ package matchers import ( + "errors" "fmt" "reflect" @@ -21,25 +22,28 @@ func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err e } actualErr := actual.(error) + expected := matcher.Expected - if isError(matcher.Expected) { - return reflect.DeepEqual(actualErr, matcher.Expected), nil + if isError(expected) { + return reflect.DeepEqual(actualErr, expected) || errors.Is(actualErr, expected.(error)), nil } - if isString(matcher.Expected) { - return actualErr.Error() == matcher.Expected, nil + if isString(expected) { + return actualErr.Error() == expected, nil } var subMatcher omegaMatcher var hasSubMatcher bool - if matcher.Expected != nil { - subMatcher, hasSubMatcher = (matcher.Expected).(omegaMatcher) + if expected != nil { + subMatcher, hasSubMatcher = (expected).(omegaMatcher) if hasSubMatcher { return subMatcher.Match(actualErr.Error()) } } - return false, fmt.Errorf("MatchError must be passed an error, string, or Matcher that can match on strings. Got:\n%s", format.Object(matcher.Expected, 1)) + return false, fmt.Errorf( + "MatchError must be passed an error, a string, or a Matcher that can match on strings. Got:\n%s", + format.Object(expected, 1)) } func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) { diff --git a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go index 640f4db1a..adc8cee63 100644 --- a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go @@ -8,7 +8,8 @@ import ( ) type PanicMatcher struct { - object interface{} + Expected interface{} + object interface{} } func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) { @@ -28,7 +29,21 @@ func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) defer func() { if e := recover(); e != nil { matcher.object = e - success = true + + if matcher.Expected == nil { + success = true + return + } + + valueMatcher, valueIsMatcher := matcher.Expected.(omegaMatcher) + if !valueIsMatcher { + valueMatcher = &EqualMatcher{Expected: matcher.Expected} + } + + success, err = valueMatcher.Match(e) + if err != nil { + err = fmt.Errorf("PanicMatcher's value matcher failed with:\n%s%s", format.Indent, err.Error()) + } } }() @@ -38,9 +53,62 @@ func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) } func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to panic") + if matcher.Expected == nil { + // We wanted any panic to occur, but none did. + return format.Message(actual, "to panic") + } + + if matcher.object == nil { + // We wanted a panic with a specific value to occur, but none did. + switch matcher.Expected.(type) { + case omegaMatcher: + return format.Message(actual, "to panic with a value matching", matcher.Expected) + default: + return format.Message(actual, "to panic with", matcher.Expected) + } + } + + // We got a panic, but the value isn't what we expected. + switch matcher.Expected.(type) { + case omegaMatcher: + return format.Message( + actual, + fmt.Sprintf( + "to panic with a value matching\n%s\nbut panicked with\n%s", + format.Object(matcher.Expected, 1), + format.Object(matcher.object, 1), + ), + ) + default: + return format.Message( + actual, + fmt.Sprintf( + "to panic with\n%s\nbut panicked with\n%s", + format.Object(matcher.Expected, 1), + format.Object(matcher.object, 1), + ), + ) + } } func (matcher *PanicMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, fmt.Sprintf("not to panic, but panicked with\n%s", format.Object(matcher.object, 1))) + if matcher.Expected == nil { + // We didn't want any panic to occur, but one did. + return format.Message(actual, fmt.Sprintf("not to panic, but panicked with\n%s", format.Object(matcher.object, 1))) + } + + // We wanted a to ensure a panic with a specific value did not occur, but it did. + switch matcher.Expected.(type) { + case omegaMatcher: + return format.Message( + actual, + fmt.Sprintf( + "not to panic with a value matching\n%s\nbut panicked with\n%s", + format.Object(matcher.Expected, 1), + format.Object(matcher.object, 1), + ), + ) + default: + return format.Message(actual, "not to panic with", matcher.Expected) + } } diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go index 108f28586..830e30827 100644 --- a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go @@ -13,13 +13,13 @@ type BipartiteGraph struct { func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) { left := NodeOrderedSet{} - for i := range leftValues { - left = append(left, Node{Id: i}) + for i, v := range leftValues { + left = append(left, Node{ID: i, Value: v}) } right := NodeOrderedSet{} - for j := range rightValues { - right = append(right, Node{Id: j + len(left)}) + for j, v := range rightValues { + right = append(right, Node{ID: j + len(left), Value: v}) } edges := EdgeSet{} @@ -31,10 +31,26 @@ func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(in } if neighbours { - edges = append(edges, Edge{Node1: left[i], Node2: right[j]}) + edges = append(edges, Edge{Node1: left[i].ID, Node2: right[j].ID}) } } } return &BipartiteGraph{left, right, edges}, nil } + +// FreeLeftRight returns left node values and right node values +// of the BipartiteGraph's nodes which are not part of the given edges. +func (bg *BipartiteGraph) FreeLeftRight(edges EdgeSet) (leftValues, rightValues []interface{}) { + for _, node := range bg.Left { + if edges.Free(node) { + leftValues = append(leftValues, node.Value) + } + } + for _, node := range bg.Right { + if edges.Free(node) { + rightValues = append(rightValues, node.Value) + } + } + return +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go index 8181f43a4..1c54edd8f 100644 --- a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go @@ -1,9 +1,14 @@ package bipartitegraph -import . "github.com/onsi/gomega/matchers/support/goraph/node" -import . "github.com/onsi/gomega/matchers/support/goraph/edge" -import "github.com/onsi/gomega/matchers/support/goraph/util" - +import ( + . "github.com/onsi/gomega/matchers/support/goraph/edge" + . "github.com/onsi/gomega/matchers/support/goraph/node" + "github.com/onsi/gomega/matchers/support/goraph/util" +) + +// LargestMatching implements the Hopcroft–Karp algorithm taking as input a bipartite graph +// and outputting a maximum cardinality matching, i.e. a set of as many edges as possible +// with the property that no two edges share an endpoint. func (bg *BipartiteGraph) LargestMatching() (matching EdgeSet) { paths := bg.maximalDisjointSLAPCollection(matching) @@ -23,7 +28,7 @@ func (bg *BipartiteGraph) maximalDisjointSLAPCollection(matching EdgeSet) (resul return } - used := make(map[Node]bool) + used := make(map[int]bool) for _, u := range guideLayers[len(guideLayers)-1] { slap, found := bg.findDisjointSLAP(u, matching, guideLayers, used) @@ -43,7 +48,7 @@ func (bg *BipartiteGraph) findDisjointSLAP( start Node, matching EdgeSet, guideLayers []NodeOrderedSet, - used map[Node]bool, + used map[int]bool, ) ([]Edge, bool) { return bg.findDisjointSLAPHelper(start, EdgeSet{}, len(guideLayers)-1, matching, guideLayers, used) } @@ -54,16 +59,16 @@ func (bg *BipartiteGraph) findDisjointSLAPHelper( currentLevel int, matching EdgeSet, guideLayers []NodeOrderedSet, - used map[Node]bool, + used map[int]bool, ) (EdgeSet, bool) { - used[currentNode] = true + used[currentNode.ID] = true if currentLevel == 0 { return currentSLAP, true } for _, nextNode := range guideLayers[currentLevel-1] { - if used[nextNode] { + if used[nextNode.ID] { continue } @@ -84,17 +89,17 @@ func (bg *BipartiteGraph) findDisjointSLAPHelper( currentSLAP = currentSLAP[:len(currentSLAP)-1] } - used[currentNode] = false + used[currentNode.ID] = false return nil, false } func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers []NodeOrderedSet) { - used := make(map[Node]bool) + used := make(map[int]bool) currentLayer := NodeOrderedSet{} for _, node := range bg.Left { if matching.Free(node) { - used[node] = true + used[node.ID] = true currentLayer = append(currentLayer, node) } } @@ -113,7 +118,7 @@ func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers [ if util.Odd(len(guideLayers)) { for _, leftNode := range lastLayer { for _, rightNode := range bg.Right { - if used[rightNode] { + if used[rightNode.ID] { continue } @@ -123,7 +128,7 @@ func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers [ } currentLayer = append(currentLayer, rightNode) - used[rightNode] = true + used[rightNode.ID] = true if matching.Free(rightNode) { done = true @@ -133,7 +138,7 @@ func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers [ } else { for _, rightNode := range lastLayer { for _, leftNode := range bg.Left { - if used[leftNode] { + if used[leftNode.ID] { continue } @@ -143,7 +148,7 @@ func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers [ } currentLayer = append(currentLayer, leftNode) - used[leftNode] = true + used[leftNode.ID] = true } } diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go index 4fd15cc06..8c38411b2 100644 --- a/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go @@ -3,15 +3,15 @@ package edge import . "github.com/onsi/gomega/matchers/support/goraph/node" type Edge struct { - Node1 Node - Node2 Node + Node1 int + Node2 int } type EdgeSet []Edge func (ec EdgeSet) Free(node Node) bool { for _, e := range ec { - if e.Node1 == node || e.Node2 == node { + if e.Node1 == node.ID || e.Node2 == node.ID { return false } } @@ -31,7 +31,7 @@ func (ec EdgeSet) Contains(edge Edge) bool { func (ec EdgeSet) FindByNodes(node1, node2 Node) (Edge, bool) { for _, e := range ec { - if (e.Node1 == node1 && e.Node2 == node2) || (e.Node1 == node2 && e.Node2 == node1) { + if (e.Node1 == node1.ID && e.Node2 == node2.ID) || (e.Node1 == node2.ID && e.Node2 == node1.ID) { return e, true } } diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go index 800c2ea8c..cd597a2f2 100644 --- a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go @@ -1,7 +1,8 @@ package node type Node struct { - Id int + ID int + Value interface{} } type NodeOrderedSet []Node diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index d463e36d3..0e1b48c03 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -17,6 +17,7 @@ import ( "errors" "math" "sync/atomic" + "time" dto "github.com/prometheus/client_model/go" ) @@ -42,11 +43,27 @@ type Counter interface { Add(float64) } +// ExemplarAdder is implemented by Counters that offer the option of adding a +// value to the Counter together with an exemplar. Its AddWithExemplar method +// works like the Add method of the Counter interface but also replaces the +// currently saved exemplar (if any) with a new one, created from the provided +// value, the current time as timestamp, and the provided labels. Empty Labels +// will lead to a valid (label-less) exemplar. But if Labels is nil, the current +// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any +// of the provided labels are invalid, or if the provided labels contain more +// than 64 runes in total. +type ExemplarAdder interface { + AddWithExemplar(value float64, exemplar Labels) +} + // CounterOpts is an alias for Opts. See there for doc comments. type CounterOpts Opts // NewCounter creates a new Counter based on the provided CounterOpts. // +// The returned implementation also implements ExemplarAdder. It is safe to +// perform the corresponding type assertion. +// // The returned implementation tracks the counter value in two separate // variables, a float64 and a uint64. The latter is used to track calls of the // Inc method and calls of the Add method with a value that can be represented @@ -61,7 +78,7 @@ func NewCounter(opts CounterOpts) Counter { nil, opts.ConstLabels, ) - result := &counter{desc: desc, labelPairs: desc.constLabelPairs} + result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now} result.init(result) // Init self-collection. return result } @@ -78,6 +95,9 @@ type counter struct { desc *Desc labelPairs []*dto.LabelPair + exemplar atomic.Value // Containing nil or a *dto.Exemplar. + + now func() time.Time // To mock out time.Now() for testing. } func (c *counter) Desc() *Desc { @@ -88,6 +108,7 @@ func (c *counter) Add(v float64) { if v < 0 { panic(errors.New("counter cannot decrease in value")) } + ival := uint64(v) if float64(ival) == v { atomic.AddUint64(&c.valInt, ival) @@ -103,6 +124,11 @@ func (c *counter) Add(v float64) { } } +func (c *counter) AddWithExemplar(v float64, e Labels) { + c.Add(v) + c.updateExemplar(v, e) +} + func (c *counter) Inc() { atomic.AddUint64(&c.valInt, 1) } @@ -112,7 +138,23 @@ func (c *counter) Write(out *dto.Metric) error { ival := atomic.LoadUint64(&c.valInt) val := fval + float64(ival) - return populateMetric(CounterValue, val, c.labelPairs, out) + var exemplar *dto.Exemplar + if e := c.exemplar.Load(); e != nil { + exemplar = e.(*dto.Exemplar) + } + + return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) +} + +func (c *counter) updateExemplar(v float64, l Labels) { + if l == nil { + return + } + e, err := newExemplar(v, c.now(), l) + if err != nil { + panic(err) + } + c.exemplar.Store(e) } // CounterVec is a Collector that bundles a set of Counters that all share the @@ -138,7 +180,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { if len(lvs) != len(desc.variableLabels) { panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) } - result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs), now: time.Now} result.init(result) // Init self-collection. return result }), @@ -267,6 +309,8 @@ type CounterFunc interface { // provided function must be concurrency-safe. The function should also honor // the contract for a Counter (values only go up, not down), but compliance will // not be checked. +// +// Check out the ExampleGaugeFunc examples for the similar GaugeFunc. func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { return newValueFunc(NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index e3232d79f..2f19f5e1e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -20,6 +20,7 @@ import ( "strings" "github.com/cespare/xxhash/v2" + //lint:ignore SA1019 Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/model" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 01977de66..98450125d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -84,25 +84,21 @@ // of those four metric types can be found in the Prometheus docs: // https://prometheus.io/docs/concepts/metric_types/ // -// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the -// Prometheus server not to assume anything about its type. -// -// In addition to the fundamental metric types Gauge, Counter, Summary, -// Histogram, and Untyped, a very important part of the Prometheus data model is -// the partitioning of samples along dimensions called labels, which results in +// In addition to the fundamental metric types Gauge, Counter, Summary, and +// Histogram, a very important part of the Prometheus data model is the +// partitioning of samples along dimensions called labels, which results in // metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, -// HistogramVec, and UntypedVec. +// and HistogramVec. // // While only the fundamental metric types implement the Metric interface, both // the metrics and their vector versions implement the Collector interface. A // Collector manages the collection of a number of Metrics, but for convenience, -// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, -// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, -// SummaryVec, HistogramVec, and UntypedVec are not. +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and +// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec, +// and HistogramVec are not. // // To create instances of Metrics and their vector versions, you need a suitable -// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or -// UntypedOpts. +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. // // Custom Collectors and constant Metrics // @@ -118,13 +114,16 @@ // existing numbers into Prometheus Metrics during collection. An own // implementation of the Collector interface is perfect for that. You can create // Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and -// NewConstSummary (and their respective Must… versions). That will happen in -// the Collect method. The Describe method has to return separate Desc -// instances, representative of the “throw-away” metrics to be created later. -// NewDesc comes in handy to create those Desc instances. Alternatively, you -// could return no Desc at all, which will mark the Collector “unchecked”. No -// checks are performed at registration time, but metric consistency will still -// be ensured at scrape time, i.e. any inconsistencies will lead to scrape +// NewConstSummary (and their respective Must… versions). NewConstMetric is used +// for all metric types with just a float64 as their value: Counter, Gauge, and +// a special “type” called Untyped. Use the latter if you are not sure if the +// mirrored metric is a Counter or a Gauge. Creation of the Metric instance +// happens in the Collect method. The Describe method has to return separate +// Desc instances, representative of the “throw-away” metrics to be created +// later. NewDesc comes in handy to create those Desc instances. Alternatively, +// you could return no Desc at all, which will mark the Collector “unchecked”. +// No checks are performed at registration time, but metric consistency will +// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape // errors. Thus, with unchecked Collectors, the responsibility to not collect // metrics that lead to inconsistencies in the total scrape result lies with the // implementer of the Collector. While this is not a desirable state, it is diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index 56d8cc209..d67573f76 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -123,7 +123,7 @@ func (g *gauge) Sub(val float64) { func (g *gauge) Write(out *dto.Metric) error { val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) - return populateMetric(GaugeValue, val, g.labelPairs, out) + return populateMetric(GaugeValue, val, g.labelPairs, nil, out) } // GaugeVec is a Collector that bundles a set of Gauges that all share the same diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index dc9247fed..ea05cf429 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -73,7 +73,7 @@ func NewGoCollector() Collector { nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", - "A summary of the GC invocation durations.", + "A summary of the pause duration of garbage collection cycles.", nil, nil), goInfoDesc: NewDesc( "go_info", diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index ac2614d52..d4ea301a3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -20,7 +20,9 @@ import ( "sort" "sync" "sync/atomic" + "time" + //lint:ignore SA1019 Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" @@ -151,6 +153,10 @@ type HistogramOpts struct { // NewHistogram creates a new Histogram based on the provided HistogramOpts. It // panics if the buckets in HistogramOpts are not in strictly increasing order. +// +// The returned implementation also implements ExemplarObserver. It is safe to +// perform the corresponding type assertion. Exemplars are tracked separately +// for each bucket. func NewHistogram(opts HistogramOpts) Histogram { return newHistogram( NewDesc( @@ -188,6 +194,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr upperBounds: opts.Buckets, labelPairs: makeLabelPairs(desc, labelValues), counts: [2]*histogramCounts{{}, {}}, + now: time.Now, } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -205,9 +212,10 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } // Finally we know the final length of h.upperBounds and can make buckets - // for both counts: + // for both counts as well as exemplars: h.counts[0].buckets = make([]uint64, len(h.upperBounds)) h.counts[1].buckets = make([]uint64, len(h.upperBounds)) + h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) h.init(h) // Init self-collection. return h @@ -254,6 +262,9 @@ type histogram struct { upperBounds []float64 labelPairs []*dto.LabelPair + exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + + now func() time.Time // To mock out time.Now() for testing. } func (h *histogram) Desc() *Desc { @@ -261,36 +272,13 @@ func (h *histogram) Desc() *Desc { } func (h *histogram) Observe(v float64) { - // TODO(beorn7): For small numbers of buckets (<30), a linear search is - // slightly faster than the binary search. If we really care, we could - // switch from one search strategy to the other depending on the number - // of buckets. - // - // Microbenchmarks (BenchmarkHistogramNoLabels): - // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op - // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op - // 300 buckets: 154 ns/op linear - binary 61.6 ns/op - i := sort.SearchFloat64s(h.upperBounds, v) - - // We increment h.countAndHotIdx so that the counter in the lower - // 63 bits gets incremented. At the same time, we get the new value - // back, which we can use to find the currently-hot counts. - n := atomic.AddUint64(&h.countAndHotIdx, 1) - hotCounts := h.counts[n>>63] + h.observe(v, h.findBucket(v)) +} - if i < len(h.upperBounds) { - atomic.AddUint64(&hotCounts.buckets[i], 1) - } - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break - } - } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) +func (h *histogram) ObserveWithExemplar(v float64, e Labels) { + i := h.findBucket(v) + h.observe(v, i) + h.updateExemplar(v, i, e) } func (h *histogram) Write(out *dto.Metric) error { @@ -329,6 +317,18 @@ func (h *histogram) Write(out *dto.Metric) error { CumulativeCount: proto.Uint64(cumCount), UpperBound: proto.Float64(upperBound), } + if e := h.exemplars[i].Load(); e != nil { + his.Bucket[i].Exemplar = e.(*dto.Exemplar) + } + } + // If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly. + if e := h.exemplars[len(h.upperBounds)].Load(); e != nil { + b := &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(math.Inf(1)), + Exemplar: e.(*dto.Exemplar), + } + his.Bucket = append(his.Bucket, b) } out.Histogram = his @@ -352,6 +352,57 @@ func (h *histogram) Write(out *dto.Metric) error { return nil } +// findBucket returns the index of the bucket for the provided value, or +// len(h.upperBounds) for the +Inf bucket. +func (h *histogram) findBucket(v float64) int { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + return sort.SearchFloat64s(h.upperBounds, v) +} + +// observe is the implementation for Observe without the findBucket part. +func (h *histogram) observe(v float64, bucket int) { + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1) + hotCounts := h.counts[n>>63] + + if bucket < len(h.upperBounds) { + atomic.AddUint64(&hotCounts.buckets[bucket], 1) + } + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) +} + +// updateExemplar replaces the exemplar for the provided bucket. With empty +// labels, it's a no-op. It panics if any of the labels is invalid. +func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { + if l == nil { + return + } + e, err := newExemplar(v, h.now(), l) + if err != nil { + panic(err) + } + h.exemplars[bucket].Store(e) +} + // HistogramVec is a Collector that bundles a set of Histograms that all share the // same Desc, but have different values for their variable labels. This is used // if you want to count the same thing partitioned by various dimensions @@ -556,7 +607,7 @@ func NewConstHistogram( } // MustNewConstHistogram is a version of NewConstHistogram that panics where -// NewConstMetric would have returned an error. +// NewConstHistogram would have returned an error. func MustNewConstHistogram( desc *Desc, count uint64, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 0df1eff88..35bd8bde3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -17,6 +17,7 @@ import ( "strings" "time" + //lint:ignore SA1019 Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/model" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go index 5806cd09e..44128016f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -50,3 +50,15 @@ type ObserverVec interface { Collector } + +// ExemplarObserver is implemented by Observers that offer the option of +// observing a value together with an exemplar. Its ObserveWithExemplar method +// works like the Observe method of an Observer but also replaces the currently +// saved exemplar (if any) with a new one, created from the provided value, the +// current time as timestamp, and the provided Labels. Empty Labels will lead to +// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is +// left in place. ObserveWithExemplar panics if any of the provided labels are +// invalid or if the provided labels contain more than 64 runes in total. +type ExemplarObserver interface { + ObserveWithExemplar(value float64, exemplar Labels) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go index e0b935d1f..f973398df 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go @@ -33,18 +33,22 @@ var ( ) type processMemoryCounters struct { - // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex + // System interface description + // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex + + // Refer to the Golang internal implementation + // https://golang.org/src/internal/syscall/windows/psapi_windows.go _ uint32 PageFaultCount uint32 - PeakWorkingSetSize uint64 - WorkingSetSize uint64 - QuotaPeakPagedPoolUsage uint64 - QuotaPagedPoolUsage uint64 - QuotaPeakNonPagedPoolUsage uint64 - QuotaNonPagedPoolUsage uint64 - PagefileUsage uint64 - PeakPagefileUsage uint64 - PrivateUsage uint64 + PeakWorkingSetSize uintptr + WorkingSetSize uintptr + QuotaPeakPagedPoolUsage uintptr + QuotaPagedPoolUsage uintptr + QuotaPeakNonPagedPoolUsage uintptr + QuotaNonPagedPoolUsage uintptr + PagefileUsage uintptr + PeakPagefileUsage uintptr + PrivateUsage uintptr } func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index d1354b101..5070e72e2 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -53,12 +53,16 @@ func (r *responseWriterDelegator) Written() int64 { } func (r *responseWriterDelegator) WriteHeader(code int) { + if r.observeWriteHeader != nil && !r.wroteHeader { + // Only call observeWriteHeader for the 1st time. It's a bug if + // WriteHeader is called more than once, but we want to protect + // against it here. Note that we still delegate the WriteHeader + // to the original ResponseWriter to not mask the bug from it. + r.observeWriteHeader(code) + } r.status = code r.wroteHeader = true r.ResponseWriter.WriteHeader(code) - if r.observeWriteHeader != nil { - r.observeWriteHeader(code) - } } func (r *responseWriterDelegator) Write(b []byte) (int, error) { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index cea5a90fd..5e1c4546c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -144,7 +144,12 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { } } - contentType := expfmt.Negotiate(req.Header) + var contentType expfmt.Format + if opts.EnableOpenMetrics { + contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header) + } else { + contentType = expfmt.Negotiate(req.Header) + } header := rsp.Header() header.Set(contentTypeHeader, string(contentType)) @@ -162,28 +167,40 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { enc := expfmt.NewEncoder(w, contentType) - var lastErr error - for _, mf := range mfs { - if err := enc.Encode(mf); err != nil { - lastErr = err - if opts.ErrorLog != nil { - opts.ErrorLog.Println("error encoding and sending metric family:", err) - } - errCnt.WithLabelValues("encoding").Inc() - switch opts.ErrorHandling { - case PanicOnError: - panic(err) - case ContinueOnError: - // Handled later. - case HTTPErrorOnError: - httpError(rsp, err) - return - } + // handleError handles the error according to opts.ErrorHandling + // and returns true if we have to abort after the handling. + handleError := func(err error) bool { + if err == nil { + return false + } + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error encoding and sending metric family:", err) + } + errCnt.WithLabelValues("encoding").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case HTTPErrorOnError: + // We cannot really send an HTTP error at this + // point because we most likely have written + // something to rsp already. But at least we can + // stop sending. + return true } + // Do nothing in all other cases, including ContinueOnError. + return false } - if lastErr != nil { - httpError(rsp, lastErr) + for _, mf := range mfs { + if handleError(enc.Encode(mf)) { + return + } + } + if closer, ok := enc.(expfmt.Closer); ok { + // This in particular takes care of the final "# EOF\n" line for OpenMetrics. + if handleError(closer.Close()) { + return + } } }) @@ -255,7 +272,12 @@ type HandlerErrorHandling int // errors are encountered. const ( // Serve an HTTP status code 500 upon the first error - // encountered. Report the error message in the body. + // encountered. Report the error message in the body. Note that HTTP + // errors cannot be served anymore once the beginning of a regular + // payload has been sent. Thus, in the (unlikely) case that encoding the + // payload into the negotiated wire format fails, serving the response + // will simply be aborted. Set an ErrorLog in HandlerOpts to detect + // those errors. HTTPErrorOnError HandlerErrorHandling = iota // Ignore errors and try to serve as many metrics as possible. However, // if no metrics can be served, serve an HTTP status code 500 and the @@ -318,6 +340,16 @@ type HandlerOpts struct { // away). Until the implementation is improved, it is recommended to // implement a separate timeout in potentially slow Collectors. Timeout time.Duration + // If true, the experimental OpenMetrics encoding is added to the + // possible options during content negotiation. Note that Prometheus + // 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is + // the only way to transmit exemplars. However, the move to OpenMetrics + // is not completely transparent. Most notably, the values of "quantile" + // labels of Summaries and "le" labels of Histograms are formatted with + // a trailing ".0" if they would otherwise look like integer numbers + // (which changes the identity of the resulting series on the Prometheus + // server). + EnableOpenMetrics bool } // gzipAccepted returns whether the client will accept gzip-encoded content. @@ -334,11 +366,9 @@ func gzipAccepted(header http.Header) bool { } // httpError removes any content-encoding header and then calls http.Error with -// the provided error and http.StatusInternalServerErrer. Error contents is -// supposed to be uncompressed plain text. However, same as with a plain -// http.Error, any header settings will be void if the header has already been -// sent. The error message will still be written to the writer, but it will -// probably be of limited use. +// the provided error and http.StatusInternalServerError. Error contents is +// supposed to be uncompressed plain text. Same as with a plain http.Error, this +// must not be called if the header or any payload has already been sent. func httpError(rsp http.ResponseWriter, err error) { rsp.Header().Del(contentEncodingHeader) http.Error( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index c05d6ee1b..ba94405af 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -26,6 +26,7 @@ import ( "unicode/utf8" "github.com/cespare/xxhash/v2" + //lint:ignore SA1019 Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/expfmt" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index ae42e761a..f3c1440d1 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -23,6 +23,7 @@ import ( "time" "github.com/beorn7/perks/quantile" + //lint:ignore SA1019 Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index eb248f108..6206928cc 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -16,8 +16,12 @@ package prometheus import ( "fmt" "sort" + "time" + "unicode/utf8" + //lint:ignore SA1019 Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" dto "github.com/prometheus/client_model/go" ) @@ -25,7 +29,8 @@ import ( // ValueType is an enumeration of metric types that represent a simple value. type ValueType int -// Possible values for the ValueType enum. +// Possible values for the ValueType enum. Use UntypedValue to mark a metric +// with an unknown type. const ( _ ValueType = iota CounterValue @@ -69,7 +74,7 @@ func (v *valueFunc) Desc() *Desc { } func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, out) + return populateMetric(v.valType, v.function(), v.labelPairs, nil, out) } // NewConstMetric returns a metric with one fixed value that cannot be @@ -116,19 +121,20 @@ func (m *constMetric) Desc() *Desc { } func (m *constMetric) Write(out *dto.Metric) error { - return populateMetric(m.valType, m.val, m.labelPairs, out) + return populateMetric(m.valType, m.val, m.labelPairs, nil, out) } func populateMetric( t ValueType, v float64, labelPairs []*dto.LabelPair, + e *dto.Exemplar, m *dto.Metric, ) error { m.Label = labelPairs switch t { case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v)} + m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e} case GaugeValue: m.Gauge = &dto.Gauge{Value: proto.Float64(v)} case UntypedValue: @@ -160,3 +166,40 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { sort.Sort(labelPairSorter(labelPairs)) return labelPairs } + +// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels. +const ExemplarMaxRunes = 64 + +// newExemplar creates a new dto.Exemplar from the provided values. An error is +// returned if any of the label names or values are invalid or if the total +// number of runes in the label names and values exceeds ExemplarMaxRunes. +func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) { + e := &dto.Exemplar{} + e.Value = proto.Float64(value) + tsProto, err := ptypes.TimestampProto(ts) + if err != nil { + return nil, err + } + e.Timestamp = tsProto + labelPairs := make([]*dto.LabelPair, 0, len(l)) + var runes int + for name, value := range l { + if !checkLabelName(name) { + return nil, fmt.Errorf("exemplar label name %q is invalid", name) + } + runes += utf8.RuneCountInString(name) + if !utf8.ValidString(value) { + return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value) + } + runes += utf8.RuneCountInString(value) + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(name), + Value: proto.String(value), + }) + } + if runes > ExemplarMaxRunes { + return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes) + } + e.Label = labelPairs + return e, nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 19df3fe6b..d53848dc4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -91,6 +91,18 @@ func (m *metricVec) Delete(labels Labels) bool { return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) } +// Without explicit forwarding of Describe, Collect, Reset, those methods won't +// show up in GoDoc. + +// Describe implements Collector. +func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) } + +// Collect implements Collector. +func (m *metricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) } + +// Reset deletes all metrics in this vector. +func (m *metricVec) Reset() { m.metricMap.Reset() } + func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { var ( newCurry []curriedLabelValue diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index e303eef6d..438aa5e92 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -17,6 +17,7 @@ import ( "fmt" "sort" + //lint:ignore SA1019 Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" @@ -27,7 +28,8 @@ import ( // registered with the wrapped Registerer in a modified way. The modified // Collector adds the provided Labels to all Metrics it collects (as // ConstLabels). The Metrics collected by the unmodified Collector must not -// duplicate any of those labels. +// duplicate any of those labels. Wrapping a nil value is valid, resulting +// in a no-op Registerer. // // WrapRegistererWith provides a way to add fixed labels to a subset of // Collectors. It should not be used to add fixed labels to all metrics exposed. @@ -50,6 +52,7 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer { // Registerer. Collectors registered with the returned Registerer will be // registered with the wrapped Registerer in a modified way. The modified // Collector adds the provided prefix to the name of all Metrics it collects. +// Wrapping a nil value is valid, resulting in a no-op Registerer. // // WrapRegistererWithPrefix is useful to have one place to prefix all metrics of // a sub-system. To make this work, register metrics of the sub-system with the @@ -80,6 +83,9 @@ type wrappingRegisterer struct { } func (r *wrappingRegisterer) Register(c Collector) error { + if r.wrappedRegisterer == nil { + return nil + } return r.wrappedRegisterer.Register(&wrappingCollector{ wrappedCollector: c, prefix: r.prefix, @@ -88,6 +94,9 @@ func (r *wrappingRegisterer) Register(c Collector) error { } func (r *wrappingRegisterer) MustRegister(cs ...Collector) { + if r.wrappedRegisterer == nil { + return + } for _, c := range cs { if err := r.Register(c); err != nil { panic(err) @@ -96,6 +105,9 @@ func (r *wrappingRegisterer) MustRegister(cs ...Collector) { } func (r *wrappingRegisterer) Unregister(c Collector) bool { + if r.wrappedRegisterer == nil { + return false + } return r.wrappedRegisterer.Unregister(&wrappingCollector{ wrappedCollector: c, prefix: r.prefix, diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 9805432c2..2f4930d9d 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -1,11 +1,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: metrics.proto -package io_prometheus_client // import "github.com/prometheus/client_model/go" +package io_prometheus_client -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -16,7 +19,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type MetricType int32 @@ -35,6 +38,7 @@ var MetricType_name = map[int32]string{ 3: "UNTYPED", 4: "HISTOGRAM", } + var MetricType_value = map[string]int32{ "COUNTER": 0, "GAUGE": 1, @@ -48,9 +52,11 @@ func (x MetricType) Enum() *MetricType { *p = x return p } + func (x MetricType) String() string { return proto.EnumName(MetricType_name, int32(x)) } + func (x *MetricType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") if err != nil { @@ -59,8 +65,9 @@ func (x *MetricType) UnmarshalJSON(data []byte) error { *x = MetricType(value) return nil } + func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} + return fileDescriptor_6039342a2ba47b72, []int{0} } type LabelPair struct { @@ -75,16 +82,17 @@ func (m *LabelPair) Reset() { *m = LabelPair{} } func (m *LabelPair) String() string { return proto.CompactTextString(m) } func (*LabelPair) ProtoMessage() {} func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} + return fileDescriptor_6039342a2ba47b72, []int{0} } + func (m *LabelPair) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LabelPair.Unmarshal(m, b) } func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) } -func (dst *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(dst, src) +func (m *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(m, src) } func (m *LabelPair) XXX_Size() int { return xxx_messageInfo_LabelPair.Size(m) @@ -120,16 +128,17 @@ func (m *Gauge) Reset() { *m = Gauge{} } func (m *Gauge) String() string { return proto.CompactTextString(m) } func (*Gauge) ProtoMessage() {} func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1} + return fileDescriptor_6039342a2ba47b72, []int{1} } + func (m *Gauge) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Gauge.Unmarshal(m, b) } func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) } -func (dst *Gauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gauge.Merge(dst, src) +func (m *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(m, src) } func (m *Gauge) XXX_Size() int { return xxx_messageInfo_Gauge.Size(m) @@ -148,26 +157,28 @@ func (m *Gauge) GetValue() float64 { } type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Counter) Reset() { *m = Counter{} } func (m *Counter) String() string { return proto.CompactTextString(m) } func (*Counter) ProtoMessage() {} func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2} + return fileDescriptor_6039342a2ba47b72, []int{2} } + func (m *Counter) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Counter.Unmarshal(m, b) } func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Counter.Marshal(b, m, deterministic) } -func (dst *Counter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Counter.Merge(dst, src) +func (m *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(m, src) } func (m *Counter) XXX_Size() int { return xxx_messageInfo_Counter.Size(m) @@ -185,6 +196,13 @@ func (m *Counter) GetValue() float64 { return 0 } +func (m *Counter) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + type Quantile struct { Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` @@ -197,16 +215,17 @@ func (m *Quantile) Reset() { *m = Quantile{} } func (m *Quantile) String() string { return proto.CompactTextString(m) } func (*Quantile) ProtoMessage() {} func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3} + return fileDescriptor_6039342a2ba47b72, []int{3} } + func (m *Quantile) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Quantile.Unmarshal(m, b) } func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) } -func (dst *Quantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_Quantile.Merge(dst, src) +func (m *Quantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantile.Merge(m, src) } func (m *Quantile) XXX_Size() int { return xxx_messageInfo_Quantile.Size(m) @@ -244,16 +263,17 @@ func (m *Summary) Reset() { *m = Summary{} } func (m *Summary) String() string { return proto.CompactTextString(m) } func (*Summary) ProtoMessage() {} func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4} + return fileDescriptor_6039342a2ba47b72, []int{4} } + func (m *Summary) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Summary.Unmarshal(m, b) } func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Summary.Marshal(b, m, deterministic) } -func (dst *Summary) XXX_Merge(src proto.Message) { - xxx_messageInfo_Summary.Merge(dst, src) +func (m *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(m, src) } func (m *Summary) XXX_Size() int { return xxx_messageInfo_Summary.Size(m) @@ -296,16 +316,17 @@ func (m *Untyped) Reset() { *m = Untyped{} } func (m *Untyped) String() string { return proto.CompactTextString(m) } func (*Untyped) ProtoMessage() {} func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5} + return fileDescriptor_6039342a2ba47b72, []int{5} } + func (m *Untyped) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Untyped.Unmarshal(m, b) } func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) } -func (dst *Untyped) XXX_Merge(src proto.Message) { - xxx_messageInfo_Untyped.Merge(dst, src) +func (m *Untyped) XXX_Merge(src proto.Message) { + xxx_messageInfo_Untyped.Merge(m, src) } func (m *Untyped) XXX_Size() int { return xxx_messageInfo_Untyped.Size(m) @@ -336,16 +357,17 @@ func (m *Histogram) Reset() { *m = Histogram{} } func (m *Histogram) String() string { return proto.CompactTextString(m) } func (*Histogram) ProtoMessage() {} func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6} + return fileDescriptor_6039342a2ba47b72, []int{6} } + func (m *Histogram) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Histogram.Unmarshal(m, b) } func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) } -func (dst *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(dst, src) +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) } func (m *Histogram) XXX_Size() int { return xxx_messageInfo_Histogram.Size(m) @@ -378,27 +400,29 @@ func (m *Histogram) GetBucket() []*Bucket { } type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Bucket) Reset() { *m = Bucket{} } func (m *Bucket) String() string { return proto.CompactTextString(m) } func (*Bucket) ProtoMessage() {} func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7} + return fileDescriptor_6039342a2ba47b72, []int{7} } + func (m *Bucket) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Bucket.Unmarshal(m, b) } func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) } -func (dst *Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bucket.Merge(dst, src) +func (m *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(m, src) } func (m *Bucket) XXX_Size() int { return xxx_messageInfo_Bucket.Size(m) @@ -423,6 +447,68 @@ func (m *Bucket) GetUpperBound() float64 { return 0 } +func (m *Bucket) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +type Exemplar struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exemplar) Reset() { *m = Exemplar{} } +func (m *Exemplar) String() string { return proto.CompactTextString(m) } +func (*Exemplar) ProtoMessage() {} +func (*Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{8} +} + +func (m *Exemplar) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Exemplar.Unmarshal(m, b) +} +func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) +} +func (m *Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exemplar.Merge(m, src) +} +func (m *Exemplar) XXX_Size() int { + return xxx_messageInfo_Exemplar.Size(m) +} +func (m *Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Exemplar proto.InternalMessageInfo + +func (m *Exemplar) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Exemplar) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + type Metric struct { Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` @@ -440,16 +526,17 @@ func (m *Metric) Reset() { *m = Metric{} } func (m *Metric) String() string { return proto.CompactTextString(m) } func (*Metric) ProtoMessage() {} func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8} + return fileDescriptor_6039342a2ba47b72, []int{9} } + func (m *Metric) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Metric.Unmarshal(m, b) } func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Metric.Marshal(b, m, deterministic) } -func (dst *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(dst, src) +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) } func (m *Metric) XXX_Size() int { return xxx_messageInfo_Metric.Size(m) @@ -523,16 +610,17 @@ func (m *MetricFamily) Reset() { *m = MetricFamily{} } func (m *MetricFamily) String() string { return proto.CompactTextString(m) } func (*MetricFamily) ProtoMessage() {} func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9} + return fileDescriptor_6039342a2ba47b72, []int{10} } + func (m *MetricFamily) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MetricFamily.Unmarshal(m, b) } func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) } -func (dst *MetricFamily) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricFamily.Merge(dst, src) +func (m *MetricFamily) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricFamily.Merge(m, src) } func (m *MetricFamily) XXX_Size() int { return xxx_messageInfo_MetricFamily.Size(m) @@ -572,6 +660,7 @@ func (m *MetricFamily) GetMetric() []*Metric { } func init() { + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") @@ -580,50 +669,55 @@ func init() { proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") - proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) } -func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) } - -var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{ - // 591 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e, - 0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89, - 0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81, - 0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47, - 0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77, - 0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e, - 0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64, - 0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58, - 0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c, - 0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2, - 0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4, - 0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12, - 0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c, - 0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee, - 0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f, - 0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54, - 0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea, - 0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63, - 0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45, - 0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d, - 0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5, - 0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d, - 0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d, - 0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7, - 0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8, - 0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2, - 0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58, - 0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11, - 0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff, - 0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02, - 0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd, - 0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25, - 0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9, - 0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27, - 0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9, - 0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48, - 0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00, +func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) } + +var fileDescriptor_6039342a2ba47b72 = []byte{ + // 665 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55, + 0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2, + 0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e, + 0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa, + 0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66, + 0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4, + 0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45, + 0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a, + 0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d, + 0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b, + 0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22, + 0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79, + 0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0, + 0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00, + 0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01, + 0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe, + 0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55, + 0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f, + 0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31, + 0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16, + 0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e, + 0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c, + 0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f, + 0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57, + 0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64, + 0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76, + 0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7, + 0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95, + 0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed, + 0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33, + 0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07, + 0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72, + 0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56, + 0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6, + 0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f, + 0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f, + 0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27, + 0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83, + 0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24, + 0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00, } diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 11839ed65..bd4e34745 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -30,17 +30,38 @@ type Encoder interface { Encode(*dto.MetricFamily) error } -type encoder func(*dto.MetricFamily) error +// Closer is implemented by Encoders that need to be closed to finalize +// encoding. (For example, OpenMetrics needs a final `# EOF` line.) +// +// Note that all Encoder implementations returned from this package implement +// Closer, too, even if the Close call is a no-op. This happens in preparation +// for adding a Close method to the Encoder interface directly in a (mildly +// breaking) release in the future. +type Closer interface { + Close() error +} + +type encoderCloser struct { + encode func(*dto.MetricFamily) error + close func() error +} -func (e encoder) Encode(v *dto.MetricFamily) error { - return e(v) +func (ec encoderCloser) Encode(v *dto.MetricFamily) error { + return ec.encode(v) } -// Negotiate returns the Content-Type based on the given Accept header. -// If no appropriate accepted type is found, FmtText is returned. +func (ec encoderCloser) Close() error { + return ec.close() +} + +// Negotiate returns the Content-Type based on the given Accept header. If no +// appropriate accepted type is found, FmtText is returned (which is the +// Prometheus text format). This function will never negotiate FmtOpenMetrics, +// as the support is still experimental. To include the option to negotiate +// FmtOpenMetrics, use NegotiateOpenMetrics. func Negotiate(h http.Header) Format { for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { - // Check for protocol buffer + ver := ac.Params["version"] if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": @@ -51,38 +72,91 @@ func Negotiate(h http.Header) Format { return FmtProtoCompact } } - // Check for text format. + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + } + return FmtText +} + +// NegotiateIncludingOpenMetrics works like Negotiate but includes +// FmtOpenMetrics as an option for the result. Note that this function is +// temporary and will disappear once FmtOpenMetrics is fully supported and as +// such may be negotiated by the normal Negotiate function. +func NegotiateIncludingOpenMetrics(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { ver := ac.Params["version"] + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { return FmtText } + if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") { + return FmtOpenMetrics + } } return FmtText } -// NewEncoder returns a new encoder based on content type negotiation. +// NewEncoder returns a new encoder based on content type negotiation. All +// Encoder implementations returned by NewEncoder also implement Closer, and +// callers should always call the Close method. It is currently only required +// for FmtOpenMetrics, but a future (breaking) release will add the Close method +// to the Encoder interface directly. The current version of the Encoder +// interface is kept for backwards compatibility. func NewEncoder(w io.Writer, format Format) Encoder { switch format { case FmtProtoDelim: - return encoder(func(v *dto.MetricFamily) error { - _, err := pbutil.WriteDelimited(w, v) - return err - }) + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }, + close: func() error { return nil }, + } case FmtProtoCompact: - return encoder(func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, v.String()) - return err - }) + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }, + close: func() error { return nil }, + } case FmtProtoText: - return encoder(func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) - return err - }) + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }, + close: func() error { return nil }, + } case FmtText: - return encoder(func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, v) - return err - }) + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }, + close: func() error { return nil }, + } + case FmtOpenMetrics: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := MetricFamilyToOpenMetrics(w, v) + return err + }, + close: func() error { + _, err := FinalizeOpenMetrics(w) + return err + }, + } } - panic("expfmt.NewEncoder: unknown format") + panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format)) } diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index c71bcb981..0f176fa64 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -19,10 +19,12 @@ type Format string // Constants to assemble the Content-Type values for the different wire protocols. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + OpenMetricsVersion = "0.0.1" // The Content-Type values for the different wire protocols. FmtUnknown Format = `` @@ -30,6 +32,7 @@ const ( FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` FmtProtoText Format = ProtoFmt + ` encoding=text` FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8` ) const ( diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go new file mode 100644 index 000000000..8a9313a3b --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -0,0 +1,527 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + "github.com/golang/protobuf/ptypes" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the +// OpenMetrics text format and writes the resulting lines to 'out'. It returns +// the number of bytes written and any error encountered. The output will have +// the same order as the input, no further sorting is performed. Furthermore, +// this function assumes the input is already sanitized and does not perform any +// sanity checks. If the input contains duplicate metrics or invalid metric or +// label names, the conversion will result in invalid text format output. +// +// This function fulfills the type 'expfmt.encoder'. +// +// Note that OpenMetrics requires a final `# EOF` line. Since this function acts +// on individual metric families, it is the responsibility of the caller to +// append this line to 'out' once all metric families have been written. +// Conveniently, this can be done by calling FinalizeOpenMetrics. +// +// The output should be fully OpenMetrics compliant. However, there are a few +// missing features and peculiarities to avoid complications when switching from +// Prometheus to OpenMetrics or vice versa: +// +// - Counters are expected to have the `_total` suffix in their metric name. In +// the output, the suffix will be truncated from the `# TYPE` and `# HELP` +// line. A counter with a missing `_total` suffix is not an error. However, +// its type will be set to `unknown` in that case to avoid invalid OpenMetrics +// output. +// +// - No support for the following (optional) features: `# UNIT` line, `_created` +// line, info type, stateset type, gaugehistogram type. +// +// - The size of exemplar labels is not checked (i.e. it's possible to create +// exemplars that are larger than allowed by the OpenMetrics specification). +// +// - The value of Counters is not checked. (OpenMetrics doesn't allow counters +// with a `NaN` value.) +func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { + name := in.GetName() + if name == "" { + return 0, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Try the interface upgrade. If it doesn't work, we'll use a + // bufio.Writer from the sync.Pool. + w, ok := out.(enhancedWriter) + if !ok { + b := bufPool.Get().(*bufio.Writer) + b.Reset(out) + w = b + defer func() { + bErr := b.Flush() + if err == nil { + err = bErr + } + bufPool.Put(b) + }() + } + + var ( + n int + metricType = in.GetType() + shortName = name + ) + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") { + shortName = name[:len(name)-6] + } + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err = w.WriteString("# HELP ") + written += n + if err != nil { + return + } + n, err = w.WriteString(shortName) + written += n + if err != nil { + return + } + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Help, true) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + n, err = w.WriteString("# TYPE ") + written += n + if err != nil { + return + } + n, err = w.WriteString(shortName) + written += n + if err != nil { + return + } + switch metricType { + case dto.MetricType_COUNTER: + if strings.HasSuffix(name, "_total") { + n, err = w.WriteString(" counter\n") + } else { + n, err = w.WriteString(" unknown\n") + } + case dto.MetricType_GAUGE: + n, err = w.WriteString(" gauge\n") + case dto.MetricType_SUMMARY: + n, err = w.WriteString(" summary\n") + case dto.MetricType_UNTYPED: + n, err = w.WriteString(" unknown\n") + case dto.MetricType_HISTOGRAM: + n, err = w.WriteString(" histogram\n") + default: + return written, fmt.Errorf("unknown metric type %s", metricType.String()) + } + written += n + if err != nil { + return + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + // Note that we have ensured above that either the name + // ends on `_total` or that the rendered type is + // `unknown`. Therefore, no `_total` must be added here. + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Counter.GetValue(), 0, false, + metric.Counter.Exemplar, + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Gauge.GetValue(), 0, false, + nil, + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Untyped.GetValue(), 0, false, + nil, + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeOpenMetricsSample( + w, name, "", metric, + model.QuantileLabel, q.GetQuantile(), + q.GetValue(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + } + n, err = writeOpenMetricsSample( + w, name, "_sum", metric, "", 0, + metric.Summary.GetSampleSum(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + n, err = writeOpenMetricsSample( + w, name, "_count", metric, "", 0, + 0, metric.Summary.GetSampleCount(), true, + nil, + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, b := range metric.Histogram.Bucket { + n, err = writeOpenMetricsSample( + w, name, "_bucket", metric, + model.BucketLabel, b.GetUpperBound(), + 0, b.GetCumulativeCount(), true, + b.Exemplar, + ) + written += n + if err != nil { + return + } + if math.IsInf(b.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeOpenMetricsSample( + w, name, "_bucket", metric, + model.BucketLabel, math.Inf(+1), + 0, metric.Histogram.GetSampleCount(), true, + nil, + ) + written += n + if err != nil { + return + } + } + n, err = writeOpenMetricsSample( + w, name, "_sum", metric, "", 0, + metric.Histogram.GetSampleSum(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + n, err = writeOpenMetricsSample( + w, name, "_count", metric, "", 0, + 0, metric.Histogram.GetSampleCount(), true, + nil, + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return + } + } + return +} + +// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics. +func FinalizeOpenMetrics(w io.Writer) (written int, err error) { + return w.Write([]byte("# EOF\n")) +} + +// writeOpenMetricsSample writes a single sample in OpenMetrics text format to +// w, given the metric name, the metric proto message itself, optionally an +// additional label name with a float64 value (use empty string as label name if +// not required), the value (optionally as float64 or uint64, determined by +// useIntValue), and optionally an exemplar (use nil if not required). The +// function returns the number of bytes written and any error encountered. +func writeOpenMetricsSample( + w enhancedWriter, + name, suffix string, + metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + floatValue float64, intValue uint64, useIntValue bool, + exemplar *dto.Exemplar, +) (int, error) { + var written int + n, err := w.WriteString(name) + written += n + if err != nil { + return written, err + } + if suffix != "" { + n, err = w.WriteString(suffix) + written += n + if err != nil { + return written, err + } + } + n, err = writeOpenMetricsLabelPairs( + w, metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + if useIntValue { + n, err = writeUint(w, intValue) + } else { + n, err = writeOpenMetricsFloat(w, floatValue) + } + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + // TODO(beorn7): Format this directly without converting to a float first. + n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000) + written += n + if err != nil { + return written, err + } + } + if exemplar != nil { + n, err = writeExemplar(w, exemplar) + written += n + if err != nil { + return written, err + } + } + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float +// in OpenMetrics style. +func writeOpenMetricsLabelPairs( + w enhancedWriter, + in []*dto.LabelPair, + additionalLabelName string, additionalLabelValue float64, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var ( + written int + separator byte = '{' + ) + for _, lp := range in { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(lp.GetName()) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeEscapedString(w, lp.GetValue(), true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(additionalLabelName) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeOpenMetricsFloat(w, additionalLabelValue) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + } + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeExemplar writes the provided exemplar in OpenMetrics format to w. The +// function returns the number of bytes written and any error encountered. +func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { + written := 0 + n, err := w.WriteString(" # ") + written += n + if err != nil { + return written, err + } + n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeOpenMetricsFloat(w, e.GetValue()) + written += n + if err != nil { + return written, err + } + if e.Timestamp != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + ts, err := ptypes.Timestamp((*e).Timestamp) + if err != nil { + return written, err + } + // TODO(beorn7): Format this directly from components of ts to + // avoid overflow/underflow and precision issues of the float + // conversion. + n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9) + written += n + if err != nil { + return written, err + } + } + return written, nil +} + +// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting +// number would otherwise contain neither a "." nor an "e". +func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) { + switch { + case f == 1: + return w.WriteString("1.0") + case f == 0: + return w.WriteString("0.0") + case f == -1: + return w.WriteString("-1.0") + case math.IsNaN(f): + return w.WriteString("NaN") + case math.IsInf(f, +1): + return w.WriteString("+Inf") + case math.IsInf(f, -1): + return w.WriteString("-Inf") + default: + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + if !bytes.ContainsAny(*bp, "e.") { + *bp = append(*bp, '.', '0') + } + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err + } +} + +// writeUint is like writeInt just for uint64. +func writeUint(w enhancedWriter, u uint64) (int, error) { + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendUint((*bp)[:0], u, 10) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 0327865ee..5ba503b06 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -423,9 +423,8 @@ var ( func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { if includeDoubleQuote { return quotedEscaper.WriteString(w, v) - } else { - return escaper.WriteString(w, v) } + return escaper.WriteString(w, v) } // writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index 7b0064fdb..490a0240c 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -186,6 +186,10 @@ var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. func ParseDuration(durationStr string) (Duration, error) { + // Allow 0 without a unit. + if durationStr == "0" { + return 0, nil + } matches := durationRE.FindStringSubmatch(durationStr) if len(matches) != 3 { return 0, fmt.Errorf("not a valid duration string: %q", durationStr) diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index 7c4ce1fa8..0aa09edac 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,4 +1,4 @@ +--- linters: enable: - - staticcheck - - govet + - golint diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index d7aea1b86..9320176ca 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -69,12 +69,21 @@ else GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) endif -PROMU_VERSION ?= 0.4.0 +GOTEST := $(GO) test +GOTEST_DIR := +ifneq ($(CIRCLE_JOB),) +ifneq ($(shell which gotestsum),) + GOTEST_DIR := test-results + GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- +endif +endif + +PROMU_VERSION ?= 0.5.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.16.0 +GOLANGCI_LINT_VERSION ?= v1.18.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -86,7 +95,8 @@ endif PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) -DOCKERFILE_PATH ?= ./ +DOCKERFILE_PATH ?= ./Dockerfile +DOCKERBUILD_CONTEXT ?= ./ DOCKER_REPO ?= prom DOCKER_ARCHS ?= amd64 @@ -140,15 +150,29 @@ else $(GO) get $(GOOPTS) -t ./... endif +.PHONY: update-go-deps +update-go-deps: + @echo ">> updating Go dependencies" + @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ + $(GO) get $$m; \ + done + GO111MODULE=$(GO111MODULE) $(GO) mod tidy +ifneq (,$(wildcard vendor)) + GO111MODULE=$(GO111MODULE) $(GO) mod vendor +endif + .PHONY: common-test-short -common-test-short: +common-test-short: $(GOTEST_DIR) @echo ">> running short tests" - GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs) + GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs) .PHONY: common-test -common-test: +common-test: $(GOTEST_DIR) @echo ">> running all tests" - GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs) + GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) + +$(GOTEST_DIR): + @mkdir -p $@ .PHONY: common-format common-format: @@ -200,7 +224,7 @@ endif .PHONY: common-build common-build: promu @echo ">> building binaries" - GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) + GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) .PHONY: common-tarball common-tarball: promu @@ -211,9 +235,10 @@ common-tarball: promu common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ + -f $(DOCKERFILE_PATH) \ --build-arg ARCH="$*" \ --build-arg OS="linux" \ - $(DOCKERFILE_PATH) + $(DOCKERBUILD_CONTEXT) .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS) diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index 2e0221552..31d42f712 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -11,11 +11,15 @@ // See the License for the specific language governing permissions and // limitations under the License. +// +build linux + package procfs import ( "bufio" "bytes" + "errors" + "regexp" "strconv" "strings" @@ -52,6 +56,11 @@ type CPUInfo struct { PowerManagement string } +var ( + cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`) + cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`) +) + // CPUInfo returns information about current system CPUs. // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt func (fs FS) CPUInfo() ([]CPUInfo, error) { @@ -62,14 +71,26 @@ func (fs FS) CPUInfo() ([]CPUInfo, error) { return parseCPUInfo(data) } -// parseCPUInfo parses data from /proc/cpuinfo -func parseCPUInfo(info []byte) ([]CPUInfo, error) { - cpuinfo := []CPUInfo{} - i := -1 +func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { scanner := bufio.NewScanner(bytes.NewReader(info)) + + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + for scanner.Scan() { line := scanner.Text() - if strings.TrimSpace(line) == "" { + if !strings.Contains(line, ":") { continue } field := strings.SplitN(line, ": ", 2) @@ -82,7 +103,7 @@ func parseCPUInfo(info []byte) ([]CPUInfo, error) { return nil, err } cpuinfo[i].Processor = uint(v) - case "vendor_id": + case "vendor", "vendor_id": cpuinfo[i].VendorID = field[1] case "cpu family": cpuinfo[i].CPUFamily = field[1] @@ -163,5 +184,237 @@ func parseCPUInfo(info []byte) ([]CPUInfo, error) { } } return cpuinfo, nil +} + +func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) + if !match || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + featuresLine := "" + commonCPUInfo := CPUInfo{} + i := 0 + if strings.TrimSpace(field[0]) == "Processor" { + commonCPUInfo = CPUInfo{ModelName: field[1]} + i = -1 + } else { + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo = []CPUInfo{firstcpu} + } + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "BogoMIPS": + if i == -1 { + cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor + i++ + cpuinfo[i].Processor = 0 + } + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + case "Features": + featuresLine = line + case "model name": + cpuinfo[i].ModelName = field[1] + } + } + fields := strings.SplitN(featuresLine, ": ", 2) + for i := range cpuinfo { + cpuinfo[i].Flags = strings.Fields(fields[1]) + } + return cpuinfo, nil + +} + +func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + commonCPUInfo := CPUInfo{VendorID: field[1]} + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "bogomips per cpu": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + commonCPUInfo.BogoMips = v + case "features": + commonCPUInfo.Flags = strings.Fields(field[1]) + } + if strings.HasPrefix(line, "processor") { + match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) + if len(match) < 2 { + return nil, errors.New("Invalid line found in cpuinfo: " + line) + } + cpu := commonCPUInfo + v, err := strconv.ParseUint(match[1], 0, 32) + if err != nil { + return nil, err + } + cpu.Processor = uint(v) + cpuinfo = append(cpuinfo, cpu) + } + if strings.HasPrefix(line, "cpu number") { + break + } + } + + i := 0 + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "cpu number": + i++ + case "cpu MHz dynamic": + clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) + v, err := strconv.ParseFloat(clock, 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + } + } + + return cpuinfo, nil +} + +func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + systemType := field[1] + + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + cpuinfo[i].VendorID = systemType + case "cpu model": + cpuinfo[i].ModelName = field[1] + case "BogoMIPS": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + } + } + return cpuinfo, nil +} + +func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "cpu": + cpuinfo[i].VendorID = field[1] + case "clock": + clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) + v, err := strconv.ParseFloat(clock, 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + } + } + return cpuinfo, nil +} +// firstNonEmptyLine advances the scanner to the first non-empty line +// and returns the contents of that line +func firstNonEmptyLine(scanner *bufio.Scanner) string { + for scanner.Scan() { + line := scanner.Text() + if strings.TrimSpace(line) != "" { + return line + } + } + return "" } diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_arm.go b/vendor/github.com/prometheus/procfs/cpuinfo_arm.go new file mode 100644 index 000000000..835550770 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_arm.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoARM diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_arm64.go b/vendor/github.com/prometheus/procfs/cpuinfo_arm64.go new file mode 100644 index 000000000..4f5d172a3 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_arm64.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build arm64 + +package procfs + +var parseCPUInfo = parseCPUInfoARM diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_default.go b/vendor/github.com/prometheus/procfs/cpuinfo_default.go new file mode 100644 index 000000000..d5bedf97f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_default.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build 386 amd64 + +package procfs + +var parseCPUInfo = parseCPUInfoX86 diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mips.go b/vendor/github.com/prometheus/procfs/cpuinfo_mips.go new file mode 100644 index 000000000..22d93f8ef --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mips.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mips64.go b/vendor/github.com/prometheus/procfs/cpuinfo_mips64.go new file mode 100644 index 000000000..22d93f8ef --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mips64.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go b/vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go new file mode 100644 index 000000000..22d93f8ef --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go new file mode 100644 index 000000000..22d93f8ef --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go new file mode 100644 index 000000000..64aee9c63 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoPPC diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go new file mode 100644 index 000000000..64aee9c63 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoPPC diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go new file mode 100644 index 000000000..26814eeba --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoS390X diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go index 19d4041b2..a95893375 100644 --- a/vendor/github.com/prometheus/procfs/crypto.go +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -14,10 +14,10 @@ package procfs import ( + "bufio" "bytes" "fmt" - "io/ioutil" - "strconv" + "io" "strings" "github.com/prometheus/procfs/internal/util" @@ -52,80 +52,102 @@ type Crypto struct { // structs containing the relevant info. More information available here: // https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html func (fs FS) Crypto() ([]Crypto, error) { - data, err := ioutil.ReadFile(fs.proc.Path("crypto")) + path := fs.proc.Path("crypto") + b, err := util.ReadFileNoStat(path) if err != nil { - return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err) + return nil, fmt.Errorf("error reading crypto %s: %s", path, err) } - crypto, err := parseCrypto(data) + + crypto, err := parseCrypto(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err) + return nil, fmt.Errorf("error parsing crypto %s: %s", path, err) } + return crypto, nil } -func parseCrypto(cryptoData []byte) ([]Crypto, error) { - crypto := []Crypto{} - - cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n")) - - for _, block := range cryptoBlocks { - var newCryptoElem Crypto - - lines := strings.Split(string(block), "\n") - for _, line := range lines { - if strings.TrimSpace(line) == "" || line[0] == ' ' { - continue - } - fields := strings.Split(line, ":") - key := strings.TrimSpace(fields[0]) - value := strings.TrimSpace(fields[1]) - vp := util.NewValueParser(value) - - switch strings.TrimSpace(key) { - case "async": - b, err := strconv.ParseBool(value) - if err == nil { - newCryptoElem.Async = b - } - case "blocksize": - newCryptoElem.Blocksize = vp.PUInt64() - case "chunksize": - newCryptoElem.Chunksize = vp.PUInt64() - case "digestsize": - newCryptoElem.Digestsize = vp.PUInt64() - case "driver": - newCryptoElem.Driver = value - case "geniv": - newCryptoElem.Geniv = value - case "internal": - newCryptoElem.Internal = value - case "ivsize": - newCryptoElem.Ivsize = vp.PUInt64() - case "maxauthsize": - newCryptoElem.Maxauthsize = vp.PUInt64() - case "max keysize": - newCryptoElem.MaxKeysize = vp.PUInt64() - case "min keysize": - newCryptoElem.MinKeysize = vp.PUInt64() - case "module": - newCryptoElem.Module = value - case "name": - newCryptoElem.Name = value - case "priority": - newCryptoElem.Priority = vp.PInt64() - case "refcnt": - newCryptoElem.Refcnt = vp.PInt64() - case "seedsize": - newCryptoElem.Seedsize = vp.PUInt64() - case "selftest": - newCryptoElem.Selftest = value - case "type": - newCryptoElem.Type = value - case "walksize": - newCryptoElem.Walksize = vp.PUInt64() - } +// parseCrypto parses a /proc/crypto stream into Crypto elements. +func parseCrypto(r io.Reader) ([]Crypto, error) { + var out []Crypto + + s := bufio.NewScanner(r) + for s.Scan() { + text := s.Text() + switch { + case strings.HasPrefix(text, "name"): + // Each crypto element begins with its name. + out = append(out, Crypto{}) + case text == "": + continue + } + + kv := strings.Split(text, ":") + if len(kv) != 2 { + return nil, fmt.Errorf("malformed crypto line: %q", text) + } + + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) + + // Parse the key/value pair into the currently focused element. + c := &out[len(out)-1] + if err := c.parseKV(k, v); err != nil { + return nil, err } - crypto = append(crypto, newCryptoElem) } - return crypto, nil + + if err := s.Err(); err != nil { + return nil, err + } + + return out, nil +} + +// parseKV parses a key/value pair into the appropriate field of c. +func (c *Crypto) parseKV(k, v string) error { + vp := util.NewValueParser(v) + + switch k { + case "async": + // Interpret literal yes as true. + c.Async = v == "yes" + case "blocksize": + c.Blocksize = vp.PUInt64() + case "chunksize": + c.Chunksize = vp.PUInt64() + case "digestsize": + c.Digestsize = vp.PUInt64() + case "driver": + c.Driver = v + case "geniv": + c.Geniv = v + case "internal": + c.Internal = v + case "ivsize": + c.Ivsize = vp.PUInt64() + case "maxauthsize": + c.Maxauthsize = vp.PUInt64() + case "max keysize": + c.MaxKeysize = vp.PUInt64() + case "min keysize": + c.MinKeysize = vp.PUInt64() + case "module": + c.Module = v + case "name": + c.Name = v + case "priority": + c.Priority = vp.PInt64() + case "refcnt": + c.Refcnt = vp.PInt64() + case "seedsize": + c.Seedsize = vp.PUInt64() + case "selftest": + c.Selftest = v + case "type": + c.Type = v + case "walksize": + c.Walksize = vp.PUInt64() + } + + return vp.Err() } diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar index c50a18ace..868c8573d 100644 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ b/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -173,6 +173,283 @@ Lines: 1 411605849 93680043 79 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/smaps +Lines: 252 +00400000-00cb1000 r-xp 00000000 fd:01 952273 /bin/alertmanager +Size: 8900 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 2952 kB +Pss: 2952 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 2952 kB +Private_Dirty: 0 kB +Referenced: 2864 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd ex mr mw me dw sd +00cb1000-016b0000 r--p 008b1000 fd:01 952273 /bin/alertmanager +Size: 10236 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 6152 kB +Pss: 6152 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 6152 kB +Private_Dirty: 0 kB +Referenced: 5308 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd mr mw me dw sd +016b0000-0171a000 rw-p 012b0000 fd:01 952273 /bin/alertmanager +Size: 424 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 176 kB +Pss: 176 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 84 kB +Private_Dirty: 92 kB +Referenced: 176 kB +Anonymous: 92 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 12 kB +SwapPss: 12 kB +Locked: 0 kB +VmFlags: rd wr mr mw me dw ac sd +0171a000-0173f000 rw-p 00000000 00:00 0 +Size: 148 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 76 kB +Pss: 76 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 76 kB +Referenced: 76 kB +Anonymous: 76 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +c000000000-c000400000 rw-p 00000000 00:00 0 +Size: 4096 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 2564 kB +Pss: 2564 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 20 kB +Private_Dirty: 2544 kB +Referenced: 2544 kB +Anonymous: 2564 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 1100 kB +SwapPss: 1100 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +c000400000-c001600000 rw-p 00000000 00:00 0 +Size: 18432 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 16024 kB +Pss: 16024 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 5864 kB +Private_Dirty: 10160 kB +Referenced: 11944 kB +Anonymous: 16024 kB +LazyFree: 5848 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 440 kB +SwapPss: 440 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd nh +c001600000-c004000000 rw-p 00000000 00:00 0 +Size: 43008 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 0 kB +Pss: 0 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 0 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +7f0ab95ca000-7f0abbb7b000 rw-p 00000000 00:00 0 +Size: 38596 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 1992 kB +Pss: 1992 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 476 kB +Private_Dirty: 1516 kB +Referenced: 1828 kB +Anonymous: 1992 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 384 kB +SwapPss: 384 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +7ffc07ecf000-7ffc07ef0000 rw-p 00000000 00:00 0 [stack] +Size: 132 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 8 kB +Pss: 8 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 8 kB +Referenced: 8 kB +Anonymous: 8 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 4 kB +SwapPss: 4 kB +Locked: 0 kB +VmFlags: rd wr mr mw me gd ac +7ffc07f9e000-7ffc07fa1000 r--p 00000000 00:00 0 [vvar] +Size: 12 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 0 kB +Pss: 0 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 0 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd mr pf io de dd sd +7ffc07fa1000-7ffc07fa3000 r-xp 00000000 00:00 0 [vdso] +Size: 8 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 4 kB +Pss: 0 kB +Shared_Clean: 4 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 4 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd ex mr mw me de sd +ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall] +Size: 4 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 0 kB +Pss: 0 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 0 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd ex +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/smaps_rollup +Lines: 17 +00400000-ffffffffff601000 ---p 00000000 00:00 0 [rollup] +Rss: 29948 kB +Pss: 29944 kB +Shared_Clean: 4 kB +Shared_Dirty: 0 kB +Private_Clean: 15548 kB +Private_Dirty: 14396 kB +Referenced: 24752 kB +Anonymous: 20756 kB +LazyFree: 5848 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 1940 kB +SwapPss: 1940 kB +Locked: 0 kB +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/26231/stat Lines: 1 26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 @@ -189,7 +466,7 @@ Ngid: 0 Pid: 26231 PPid: 1 TracerPid: 0 -Uid: 0 0 0 0 +Uid: 1000 1000 1000 0 Gid: 0 0 0 0 FDSize: 128 Groups: @@ -235,6 +512,11 @@ voluntary_ctxt_switches: 4742839 nonvoluntary_ctxt_switches: 1727500 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/wchan +Lines: 1 +poll_schedule_timeoutEOF +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/26232 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -289,6 +571,19 @@ Max realtime priority 0 0 Max realtime timeout unlimited unlimited us Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/maps +Lines: 9 +55680ae1e000-55680ae20000 r--p 00000000 fd:01 47316994 /bin/cat +55680ae29000-55680ae2a000 rwxs 0000a000 fd:01 47316994 /bin/cat +55680bed6000-55680bef7000 rw-p 00000000 00:00 0 [heap] +7fdf964fc000-7fdf973f2000 r--p 00000000 fd:01 17432624 /usr/lib/locale/locale-archive +7fdf973f2000-7fdf97417000 r--p 00000000 fd:01 60571062 /lib/x86_64-linux-gnu/libc-2.29.so +7ffe9215c000-7ffe9217f000 rw-p 00000000 00:00 0 [stack] +7ffe921da000-7ffe921dd000 r--p 00000000 00:00 0 [vvar] +7ffe921dd000-7ffe921de000 r-xp 00000000 00:00 0 [vdso] +ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall] +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/26232/root SymlinkTo: /does/not/exist # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -297,6 +592,11 @@ Lines: 1 33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/wchan +Lines: 1 +0EOF +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/26233 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -317,6 +617,17 @@ Lines: 8 || || Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26234 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26234/maps +Lines: 4 +08048000-08089000 r-xp 00000000 03:01 104219 /bin/tcsh +08089000-0808c000 rw-p 00041000 03:01 104219 /bin/tcsh +0808c000-08146000 rwxp 00000000 00:00 0 +40000000-40015000 r-xp 00000000 03:01 61874 /lib/ld-2.3.2.so +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/584 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -554,7 +865,7 @@ power management: Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/crypto -Lines: 971 +Lines: 972 name : ccm(aes) driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni)) module : ccm @@ -588,6 +899,7 @@ refcnt : 1 selftest : passed internal : no type : kpp +async : yes name : ecb(arc4) driver : ecb(arc4)-generic @@ -1529,7 +1841,7 @@ max keysize : 32 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/diskstats -Lines: 49 +Lines: 52 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 @@ -1579,11 +1891,45 @@ Lines: 49 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 + 8 0 sdc 14202 71 579164 21861 2995 1589 180500 40875 0 11628 55200 0 0 0 0 127 182 + 8 1 sdc1 1027 0 13795 5021 2 0 4096 3 0 690 4579 0 0 0 0 0 0 + 8 2 sdc2 13126 71 561749 16802 2830 1589 176404 40620 0 10931 50449 0 0 0 0 0 0 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/fs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/fs/fscache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/fs/fscache/stats +Lines: 24 +FS-Cache statistics +Cookies: idx=3 dat=67877 spc=0 +Objects: alc=67473 nal=0 avl=67473 ded=388 +ChkAux : non=12 ok=33 upd=44 obs=55 +Pages : mrk=547164 unc=364577 +Acquire: n=67880 nul=98 noc=25 ok=67780 nbf=39 oom=26 +Lookups: n=67473 neg=67470 pos=58 crt=67473 tmo=85 +Invals : n=14 run=13 +Updates: n=7 nul=3 run=8 +Relinqs: n=394 nul=1 wcr=2 rtr=3 +AttrChg: n=6 ok=5 nbf=4 oom=3 run=2 +Allocs : n=20 ok=19 wt=18 nbf=17 int=16 +Allocs : ops=15 owt=14 abt=13 +Retrvls: n=151959 ok=82823 wt=23467 nod=69136 nbf=15 int=69 oom=43 +Retrvls: ops=151959 owt=42747 abt=44 +Stores : n=225565 ok=225565 agn=12 nbf=13 oom=14 +Stores : ops=69156 run=294721 pgs=225565 rxd=225565 olm=43 +VmScan : nos=364512 gon=2 bsy=43 can=12 wt=66 +Ops : pend=42753 run=221129 enq=628798 can=11 rej=88 +Ops : ini=377538 dfr=27 rel=377538 gc=37 +CacheOp: alo=1 luo=2 luc=3 gro=4 +CacheOp: inv=5 upo=6 dro=7 pto=8 atc=9 syn=10 +CacheOp: rap=11 ras=12 alp=13 als=14 wrp=15 ucp=16 dsp=17 +CacheEv: nsp=18 stl=19 rtr=20 cul=21EOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/fs/xfs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -1614,6 +1960,11 @@ xpc 399724544 92823103 86219234 debug 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/loadavg +Lines: 1 +0.02 0.04 0.05 1/497 11947 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/mdstat Lines: 56 Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] @@ -1821,8 +2172,35 @@ FRAG6: inuse 0 memory 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/net/softnet_stat -Lines: 1 +Lines: 2 00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +01663fb2 00000000 000109a4 00000000 00000000 00000000 00000000 00000000 00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/softnet_stat.broken +Lines: 1 +00015c73 00020e76 F0000769 00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/udp +Lines: 4 + sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode + 0: 0A000005:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 + 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 + 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/udp6 +Lines: 3 + sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops + 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 + 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/udp_broken +Lines: 2 + sl local_address rem_address st + 1: 00000000:0016 00000000:0000 0A Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/net/unix @@ -1930,6 +2308,12 @@ procs_blocked 1 softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/swaps +Lines: 2 +Filename Type Size Used Priority +/dev/dm-2 partition 131068 176 -2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/symlinktargets Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -1962,6 +2346,32 @@ Mode: 644 Directory: fixtures/proc/sys Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/sys/kernel +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/sys/kernel/random +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/entropy_avail +Lines: 1 +3943 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/poolsize +Lines: 1 +4096 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/urandom_min_reseed_secs +Lines: 1 +60 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/write_wakeup_threshold +Lines: 1 +3072 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/sys/vm Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -2463,6 +2873,237 @@ Mode: 664 Directory: fixtures/sys/block/sda Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/sda/queue +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/add_random +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/chunk_sectors +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/dax +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_granularity +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_max_bytes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_max_hw_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_zeroes_data +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/fua +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/hw_sector_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/io_poll +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/io_poll_delay +Lines: 1 +-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/io_timeout +Lines: 1 +30000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/sda/queue/iosched +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/back_seek_max +Lines: 1 +16384 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/back_seek_penalty +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_async +Lines: 1 +250 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_sync +Lines: 1 +125 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/low_latency +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/max_budget +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/slice_idle +Lines: 1 +8 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/slice_idle_us +Lines: 1 +8000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/strict_guarantees +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/timeout_sync +Lines: 1 +125 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iostats +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/logical_block_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_discard_segments +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_hw_sectors_kb +Lines: 1 +32767 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_integrity_segments +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_sectors_kb +Lines: 1 +1280 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_segment_size +Lines: 1 +65536 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_segments +Lines: 1 +168 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/minimum_io_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/nomerges +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/nr_requests +Lines: 1 +64 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/nr_zones +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/optimal_io_size +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/physical_block_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/read_ahead_kb +Lines: 1 +128 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/rotational +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/rq_affinity +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/scheduler +Lines: 1 +mq-deadline kyber [bfq] none +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/wbt_lat_usec +Lines: 1 +75000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/write_cache +Lines: 1 +write back +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/write_same_max_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/write_zeroes_max_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/zoned +Lines: 1 +none +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/block/sda/stat Lines: 1 9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 @@ -2471,6 +3112,140 @@ Mode: 664 Directory: fixtures/sys/class Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/fc_host +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/fc_host/host0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/dev_loss_tmo +Lines: 1 +30 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/fabric_name +Lines: 1 +0x0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/node_name +Lines: 1 +0x2000e0071bce95f2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_id +Lines: 1 +0x000002 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_name +Lines: 1 +0x1000e0071bce95f2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_state +Lines: 1 +Online +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_type +Lines: 1 +Point-To-Point (direct nport connection) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/speed +Lines: 1 +16 Gbit +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/fc_host/host0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/dumped_frames +Lines: 1 +0xffffffffffffffff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/error_frames +Lines: 1 +0x0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/fcp_packet_aborts +Lines: 1 +0x13 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/invalid_crc_count +Lines: 1 +0x2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/invalid_tx_word_count +Lines: 1 +0x8 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/link_failure_count +Lines: 1 +0x9 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_signal_count +Lines: 1 +0x11 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_sync_count +Lines: 1 +0x10 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/nos_count +Lines: 1 +0x12 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/rx_frames +Lines: 1 +0x3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/rx_words +Lines: 1 +0x4 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/seconds_since_last_reset +Lines: 1 +0x7 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/tx_frames +Lines: 1 +0x5 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/tx_words +Lines: 1 +0x6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/supported_classes +Lines: 1 +Class 3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/supported_speeds +Lines: 1 +4 Gbit, 8 Gbit, 16 Gbit +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/symbolic_name +Lines: 1 +Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/sys/class/infiniband Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -2501,6 +3276,11 @@ Mode: 755 Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/VL15_dropped +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors Lines: 1 0 @@ -2602,6 +3382,11 @@ Mode: 755 Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/VL15_dropped +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors Lines: 1 0 @@ -3046,7 +3831,7 @@ Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/class/thermal/thermal_zone1/temp Lines: 1 -44000 +-44000 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/class/thermal/thermal_zone1/type @@ -4224,6 +5009,17 @@ Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/writeback_rate_debug +Lines: 7 +rate: 1.1M/sec +dirty: 20.4G +target: 20.4G +proportional: 427.5k +integral: 790.0k +change: 321.5k/sec +next io: 17ms +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size Lines: 1 0 diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go new file mode 100644 index 000000000..8783cf3cc --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -0,0 +1,422 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Fscacheinfo represents fscache statistics. +type Fscacheinfo struct { + // Number of index cookies allocated + IndexCookiesAllocated uint64 + // data storage cookies allocated + DataStorageCookiesAllocated uint64 + // Number of special cookies allocated + SpecialCookiesAllocated uint64 + // Number of objects allocated + ObjectsAllocated uint64 + // Number of object allocation failures + ObjectAllocationsFailure uint64 + // Number of objects that reached the available state + ObjectsAvailable uint64 + // Number of objects that reached the dead state + ObjectsDead uint64 + // Number of objects that didn't have a coherency check + ObjectsWithoutCoherencyCheck uint64 + // Number of objects that passed a coherency check + ObjectsWithCoherencyCheck uint64 + // Number of objects that needed a coherency data update + ObjectsNeedCoherencyCheckUpdate uint64 + // Number of objects that were declared obsolete + ObjectsDeclaredObsolete uint64 + // Number of pages marked as being cached + PagesMarkedAsBeingCached uint64 + // Number of uncache page requests seen + UncachePagesRequestSeen uint64 + // Number of acquire cookie requests seen + AcquireCookiesRequestSeen uint64 + // Number of acq reqs given a NULL parent + AcquireRequestsWithNullParent uint64 + // Number of acq reqs rejected due to no cache available + AcquireRequestsRejectedNoCacheAvailable uint64 + // Number of acq reqs succeeded + AcquireRequestsSucceeded uint64 + // Number of acq reqs rejected due to error + AcquireRequestsRejectedDueToError uint64 + // Number of acq reqs failed on ENOMEM + AcquireRequestsFailedDueToEnomem uint64 + // Number of lookup calls made on cache backends + LookupsNumber uint64 + // Number of negative lookups made + LookupsNegative uint64 + // Number of positive lookups made + LookupsPositive uint64 + // Number of objects created by lookup + ObjectsCreatedByLookup uint64 + // Number of lookups timed out and requeued + LookupsTimedOutAndRequed uint64 + InvalidationsNumber uint64 + InvalidationsRunning uint64 + // Number of update cookie requests seen + UpdateCookieRequestSeen uint64 + // Number of upd reqs given a NULL parent + UpdateRequestsWithNullParent uint64 + // Number of upd reqs granted CPU time + UpdateRequestsRunning uint64 + // Number of relinquish cookie requests seen + RelinquishCookiesRequestSeen uint64 + // Number of rlq reqs given a NULL parent + RelinquishCookiesWithNullParent uint64 + // Number of rlq reqs waited on completion of creation + RelinquishRequestsWaitingCompleteCreation uint64 + // Relinqs rtr + RelinquishRetries uint64 + // Number of attribute changed requests seen + AttributeChangedRequestsSeen uint64 + // Number of attr changed requests queued + AttributeChangedRequestsQueued uint64 + // Number of attr changed rejected -ENOBUFS + AttributeChangedRejectDueToEnobufs uint64 + // Number of attr changed failed -ENOMEM + AttributeChangedFailedDueToEnomem uint64 + // Number of attr changed ops given CPU time + AttributeChangedOps uint64 + // Number of allocation requests seen + AllocationRequestsSeen uint64 + // Number of successful alloc reqs + AllocationOkRequests uint64 + // Number of alloc reqs that waited on lookup completion + AllocationWaitingOnLookup uint64 + // Number of alloc reqs rejected -ENOBUFS + AllocationsRejectedDueToEnobufs uint64 + // Number of alloc reqs aborted -ERESTARTSYS + AllocationsAbortedDueToErestartsys uint64 + // Number of alloc reqs submitted + AllocationOperationsSubmitted uint64 + // Number of alloc reqs waited for CPU time + AllocationsWaitedForCPU uint64 + // Number of alloc reqs aborted due to object death + AllocationsAbortedDueToObjectDeath uint64 + // Number of retrieval (read) requests seen + RetrievalsReadRequests uint64 + // Number of successful retr reqs + RetrievalsOk uint64 + // Number of retr reqs that waited on lookup completion + RetrievalsWaitingLookupCompletion uint64 + // Number of retr reqs returned -ENODATA + RetrievalsReturnedEnodata uint64 + // Number of retr reqs rejected -ENOBUFS + RetrievalsRejectedDueToEnobufs uint64 + // Number of retr reqs aborted -ERESTARTSYS + RetrievalsAbortedDueToErestartsys uint64 + // Number of retr reqs failed -ENOMEM + RetrievalsFailedDueToEnomem uint64 + // Number of retr reqs submitted + RetrievalsRequests uint64 + // Number of retr reqs waited for CPU time + RetrievalsWaitingCPU uint64 + // Number of retr reqs aborted due to object death + RetrievalsAbortedDueToObjectDeath uint64 + // Number of storage (write) requests seen + StoreWriteRequests uint64 + // Number of successful store reqs + StoreSuccessfulRequests uint64 + // Number of store reqs on a page already pending storage + StoreRequestsOnPendingStorage uint64 + // Number of store reqs rejected -ENOBUFS + StoreRequestsRejectedDueToEnobufs uint64 + // Number of store reqs failed -ENOMEM + StoreRequestsFailedDueToEnomem uint64 + // Number of store reqs submitted + StoreRequestsSubmitted uint64 + // Number of store reqs granted CPU time + StoreRequestsRunning uint64 + // Number of pages given store req processing time + StorePagesWithRequestsProcessing uint64 + // Number of store reqs deleted from tracking tree + StoreRequestsDeleted uint64 + // Number of store reqs over store limit + StoreRequestsOverStoreLimit uint64 + // Number of release reqs against pages with no pending store + ReleaseRequestsAgainstPagesWithNoPendingStorage uint64 + // Number of release reqs against pages stored by time lock granted + ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 + // Number of release reqs ignored due to in-progress store + ReleaseRequestsIgnoredDueToInProgressStore uint64 + // Number of page stores cancelled due to release req + PageStoresCancelledByReleaseRequests uint64 + VmscanWaiting uint64 + // Number of times async ops added to pending queues + OpsPending uint64 + // Number of times async ops given CPU time + OpsRunning uint64 + // Number of times async ops queued for processing + OpsEnqueued uint64 + // Number of async ops cancelled + OpsCancelled uint64 + // Number of async ops rejected due to object lookup/create failure + OpsRejected uint64 + // Number of async ops initialised + OpsInitialised uint64 + // Number of async ops queued for deferred release + OpsDeferred uint64 + // Number of async ops released (should equal ini=N when idle) + OpsReleased uint64 + // Number of deferred-release async ops garbage collected + OpsGarbageCollected uint64 + // Number of in-progress alloc_object() cache ops + CacheopAllocationsinProgress uint64 + // Number of in-progress lookup_object() cache ops + CacheopLookupObjectInProgress uint64 + // Number of in-progress lookup_complete() cache ops + CacheopLookupCompleteInPorgress uint64 + // Number of in-progress grab_object() cache ops + CacheopGrabObjectInProgress uint64 + CacheopInvalidations uint64 + // Number of in-progress update_object() cache ops + CacheopUpdateObjectInProgress uint64 + // Number of in-progress drop_object() cache ops + CacheopDropObjectInProgress uint64 + // Number of in-progress put_object() cache ops + CacheopPutObjectInProgress uint64 + // Number of in-progress attr_changed() cache ops + CacheopAttributeChangeInProgress uint64 + // Number of in-progress sync_cache() cache ops + CacheopSyncCacheInProgress uint64 + // Number of in-progress read_or_alloc_page() cache ops + CacheopReadOrAllocPageInProgress uint64 + // Number of in-progress read_or_alloc_pages() cache ops + CacheopReadOrAllocPagesInProgress uint64 + // Number of in-progress allocate_page() cache ops + CacheopAllocatePageInProgress uint64 + // Number of in-progress allocate_pages() cache ops + CacheopAllocatePagesInProgress uint64 + // Number of in-progress write_page() cache ops + CacheopWritePagesInProgress uint64 + // Number of in-progress uncache_page() cache ops + CacheopUncachePagesInProgress uint64 + // Number of in-progress dissociate_pages() cache ops + CacheopDissociatePagesInProgress uint64 + // Number of object lookups/creations rejected due to lack of space + CacheevLookupsAndCreationsRejectedLackSpace uint64 + // Number of stale objects deleted + CacheevStaleObjectsDeleted uint64 + // Number of objects retired when relinquished + CacheevRetiredWhenReliquished uint64 + // Number of objects culled + CacheevObjectsCulled uint64 +} + +// Fscacheinfo returns information about current fscache statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt +func (fs FS) Fscacheinfo() (Fscacheinfo, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats")) + if err != nil { + return Fscacheinfo{}, err + } + + m, err := parseFscacheinfo(bytes.NewReader(b)) + if err != nil { + return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %v", err) + } + + return *m, nil +} + +func setFSCacheFields(fields []string, setFields ...*uint64) error { + var err error + if len(fields) < len(setFields) { + return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields)) + } + + for i := range setFields { + *setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64) + if err != nil { + return err + } + } + return nil +} + +func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { + var m Fscacheinfo + s := bufio.NewScanner(r) + for s.Scan() { + fields := strings.Fields(s.Text()) + if len(fields) < 2 { + return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text()) + } + + switch fields[0] { + case "Cookies:": + err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated, + &m.SpecialCookiesAllocated) + if err != nil { + return &m, err + } + case "Objects:": + err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure, + &m.ObjectsAvailable, &m.ObjectsDead) + if err != nil { + return &m, err + } + case "ChkAux": + err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck, + &m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete) + if err != nil { + return &m, err + } + case "Pages": + err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen) + if err != nil { + return &m, err + } + case "Acquire:": + err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent, + &m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError, + &m.AcquireRequestsFailedDueToEnomem) + if err != nil { + return &m, err + } + case "Lookups:": + err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive, + &m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed) + if err != nil { + return &m, err + } + case "Invals": + err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning) + if err != nil { + return &m, err + } + case "Updates:": + err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent, + &m.UpdateRequestsRunning) + if err != nil { + return &m, err + } + case "Relinqs:": + err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent, + &m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries) + if err != nil { + return &m, err + } + case "AttrChg:": + err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued, + &m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps) + if err != nil { + return &m, err + } + case "Allocs": + if strings.Split(fields[2], "=")[0] == "n" { + err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests, + &m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU, + &m.AllocationsAbortedDueToObjectDeath) + if err != nil { + return &m, err + } + } + case "Retrvls:": + if strings.Split(fields[1], "=")[0] == "n" { + err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion, + &m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys, + &m.RetrievalsFailedDueToEnomem) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath) + if err != nil { + return &m, err + } + } + case "Stores": + if strings.Split(fields[2], "=")[0] == "n" { + err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests, + &m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning, + &m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit) + if err != nil { + return &m, err + } + } + case "VmScan": + err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage, + &m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore, + &m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting) + if err != nil { + return &m, err + } + case "Ops": + if strings.Split(fields[2], "=")[0] == "pend" { + err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected) + if err != nil { + return &m, err + } + } + case "CacheOp:": + if strings.Split(fields[1], "=")[0] == "alo" { + err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress, + &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress) + if err != nil { + return &m, err + } + } else if strings.Split(fields[1], "=")[0] == "inv" { + err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress, + &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress, + &m.CacheopSyncCacheInProgress) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress, + &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress, + &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress) + if err != nil { + return &m, err + } + } + case "CacheEv:": + err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted, + &m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled) + if err != nil { + return &m, err + } + } + } + + return &m, nil +} diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod index 0e04e5d1f..ded48253c 100644 --- a/vendor/github.com/prometheus/procfs/go.mod +++ b/vendor/github.com/prometheus/procfs/go.mod @@ -5,4 +5,5 @@ go 1.12 require ( github.com/google/go-cmp v0.3.1 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e + golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e ) diff --git a/vendor/github.com/prometheus/procfs/go.sum b/vendor/github.com/prometheus/procfs/go.sum index 33b824b01..54b5f3303 100644 --- a/vendor/github.com/prometheus/procfs/go.sum +++ b/vendor/github.com/prometheus/procfs/go.sum @@ -2,3 +2,5 @@ github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e h1:LwyF2AFISC9nVbS6MgzsaQNSUsRXI49GS+YQ5KX/QH0= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index 755591d9a..22cb07a6b 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -73,6 +73,15 @@ func ReadUintFromFile(path string) (uint64, error) { return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) } +// ReadIntFromFile reads a file and attempts to parse a int64 from it. +func ReadIntFromFile(path string) (int64, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) +} + // ParseBool parses a string into a boolean pointer. func ParseBool(b string) *bool { var truth bool diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go new file mode 100644 index 000000000..beefdf02e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/kernel_random.go @@ -0,0 +1,62 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "os" + + "github.com/prometheus/procfs/internal/util" +) + +// KernelRandom contains information about to the kernel's random number generator. +type KernelRandom struct { + // EntropyAvaliable gives the available entropy, in bits. + EntropyAvaliable *uint64 + // PoolSize gives the size of the entropy pool, in bytes. + PoolSize *uint64 + // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded. + URandomMinReseedSeconds *uint64 + // WriteWakeupThreshold the number of bits of entropy below which we wake up processes + // that do a select(2) or poll(2) for write access to /dev/random. + WriteWakeupThreshold *uint64 + // ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep + // waiting for entropy from /dev/random. + ReadWakeupThreshold *uint64 +} + +// KernelRandom returns values from /proc/sys/kernel/random. +func (fs FS) KernelRandom() (KernelRandom, error) { + random := KernelRandom{} + + for file, p := range map[string]**uint64{ + "entropy_avail": &random.EntropyAvaliable, + "poolsize": &random.PoolSize, + "urandom_min_reseed_secs": &random.URandomMinReseedSeconds, + "write_wakeup_threshold": &random.WriteWakeupThreshold, + "read_wakeup_threshold": &random.ReadWakeupThreshold, + } { + val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file)) + if os.IsNotExist(err) { + continue + } + if err != nil { + return random, err + } + *p = &val + } + + return random, nil +} diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go new file mode 100644 index 000000000..00bbe1441 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -0,0 +1,62 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// LoadAvg represents an entry in /proc/loadavg +type LoadAvg struct { + Load1 float64 + Load5 float64 + Load15 float64 +} + +// LoadAvg returns loadavg from /proc. +func (fs FS) LoadAvg() (*LoadAvg, error) { + path := fs.proc.Path("loadavg") + + data, err := util.ReadFileNoStat(path) + if err != nil { + return nil, err + } + return parseLoad(data) +} + +// Parse /proc loadavg and return 1m, 5m and 15m. +func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { + loads := make([]float64, 3) + parts := strings.Fields(string(loadavgBytes)) + if len(parts) < 3 { + return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %s", string(loadavgBytes)) + } + + var err error + for i, load := range parts[0:3] { + loads[i], err = strconv.ParseFloat(load, 64) + if err != nil { + return nil, fmt.Errorf("could not parse load '%s': %s", load, err) + } + } + return &LoadAvg{ + Load1: loads[0], + Load5: loads[1], + Load15: loads[2], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index 2af3ada18..3e9362a94 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -52,7 +52,7 @@ type MDStat struct { func (fs FS) MDStat() ([]MDStat, error) { data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) if err != nil { - return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) + return nil, err } mdstat, err := parseMDStat(data) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go index bb01bb5a2..59f4d5055 100644 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -29,10 +29,10 @@ import ( // is described in the following man page. // http://man7.org/linux/man-pages/man5/proc.5.html type MountInfo struct { - // Unique Id for the mount - MountId int - // The Id of the parent mount - ParentId int + // Unique ID for the mount + MountID int + // The ID of the parent mount + ParentID int // The value of `st_dev` for the files on this FS MajorMinorVer string // The pathname of the directory in the FS that forms @@ -77,7 +77,7 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { mountInfo := strings.Split(mountString, " ") mountInfoLength := len(mountInfo) - if mountInfoLength < 11 { + if mountInfoLength < 10 { return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) } @@ -96,11 +96,11 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]), } - mount.MountId, err = strconv.Atoi(mountInfo[0]) + mount.MountID, err = strconv.Atoi(mountInfo[0]) if err != nil { return nil, fmt.Errorf("failed to parse mount ID") } - mount.ParentId, err = strconv.Atoi(mountInfo[1]) + mount.ParentID, err = strconv.Atoi(mountInfo[1]) if err != nil { return nil, fmt.Errorf("failed to parse parent ID") } @@ -144,7 +144,7 @@ func mountOptionsParseOptionalFields(o []string) (map[string]string, error) { return optionalFields, nil } -// Parses the mount options, superblock options. +// mountOptionsParser parses the mount options, superblock options. func mountOptionsParser(mountOptions string) map[string]string { opts := make(map[string]string) options := strings.Split(mountOptions, ",") @@ -161,7 +161,7 @@ func mountOptionsParser(mountOptions string) map[string]string { return opts } -// Retrieves mountinfo information from `/proc/self/mountinfo`. +// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`. func GetMounts() ([]*MountInfo, error) { data, err := util.ReadFileNoStat("/proc/self/mountinfo") if err != nil { @@ -170,7 +170,7 @@ func GetMounts() ([]*MountInfo, error) { return parseMountInfo(data) } -// Retrieves mountinfo information from a processes' `/proc//mountinfo`. +// GetProcMounts retrieves mountinfo information from a processes' `/proc//mountinfo`. func GetProcMounts(pid int) ([]*MountInfo, error) { data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid)) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 35b2ef351..861ced9da 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -186,6 +186,8 @@ type NFSOperationStats struct { CumulativeTotalResponseMilliseconds uint64 // Duration from when a request was enqueued to when it was completely handled. CumulativeTotalRequestMilliseconds uint64 + // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. + Errors uint64 } // A NFSTransportStats contains statistics for the NFS mount RPC requests and @@ -494,8 +496,8 @@ func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { // line is reached. func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { const ( - // Number of expected fields in each per-operation statistics set - numFields = 9 + // Minimum number of expected fields in each per-operation statistics set + minFields = 9 ) var ops []NFSOperationStats @@ -508,12 +510,12 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { break } - if len(ss) != numFields { + if len(ss) < minFields { return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) } // Skip string operation name for integers - ns := make([]uint64, 0, numFields-1) + ns := make([]uint64, 0, minFields-1) for _, st := range ss[1:] { n, err := strconv.ParseUint(st, 10, 64) if err != nil { @@ -523,7 +525,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { ns = append(ns, n) } - ops = append(ops, NFSOperationStats{ + opStats := NFSOperationStats{ Operation: strings.TrimSuffix(ss[0], ":"), Requests: ns[0], Transmissions: ns[1], @@ -533,7 +535,13 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { CumulativeQueueMilliseconds: ns[5], CumulativeTotalResponseMilliseconds: ns[6], CumulativeTotalRequestMilliseconds: ns[7], - }) + } + + if len(ns) > 8 { + opStats.Errors = ns[8] + } + + ops = append(ops, opStats) } return ops, s.Err() diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go new file mode 100644 index 000000000..b637be984 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -0,0 +1,153 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A ConntrackStatEntry represents one line from net/stat/nf_conntrack +// and contains netfilter conntrack statistics at one CPU core +type ConntrackStatEntry struct { + Entries uint64 + Found uint64 + Invalid uint64 + Ignore uint64 + Insert uint64 + InsertFailed uint64 + Drop uint64 + EarlyDrop uint64 + SearchRestart uint64 +} + +// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores +func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { + return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) +} + +// Parses a slice of ConntrackStatEntries from the given filepath +func readConntrackStat(path string) ([]ConntrackStatEntry, error) { + // This file is small and can be read with one syscall. + b, err := util.ReadFileNoStat(path) + if err != nil { + // Do not wrap this error so the caller can detect os.IsNotExist and + // similar conditions. + return nil, err + } + + stat, err := parseConntrackStat(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read conntrack stats from %q: %v", path, err) + } + + return stat, nil +} + +// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries +func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { + var entries []ConntrackStatEntry + + scanner := bufio.NewScanner(r) + scanner.Scan() + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + conntrackEntry, err := parseConntrackStatEntry(fields) + if err != nil { + return nil, err + } + entries = append(entries, *conntrackEntry) + } + + return entries, nil +} + +// Parses a ConntrackStatEntry from given array of fields +func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { + if len(fields) != 17 { + return nil, fmt.Errorf("invalid conntrackstat entry, missing fields") + } + entry := &ConntrackStatEntry{} + + entries, err := parseConntrackStatField(fields[0]) + if err != nil { + return nil, err + } + entry.Entries = entries + + found, err := parseConntrackStatField(fields[2]) + if err != nil { + return nil, err + } + entry.Found = found + + invalid, err := parseConntrackStatField(fields[4]) + if err != nil { + return nil, err + } + entry.Invalid = invalid + + ignore, err := parseConntrackStatField(fields[5]) + if err != nil { + return nil, err + } + entry.Ignore = ignore + + insert, err := parseConntrackStatField(fields[8]) + if err != nil { + return nil, err + } + entry.Insert = insert + + insertFailed, err := parseConntrackStatField(fields[9]) + if err != nil { + return nil, err + } + entry.InsertFailed = insertFailed + + drop, err := parseConntrackStatField(fields[10]) + if err != nil { + return nil, err + } + entry.Drop = drop + + earlyDrop, err := parseConntrackStatField(fields[11]) + if err != nil { + return nil, err + } + entry.EarlyDrop = earlyDrop + + searchRestart, err := parseConntrackStatField(fields[16]) + if err != nil { + return nil, err + } + entry.SearchRestart = searchRestart + + return entry, nil +} + +// Parses a uint64 from given hex in string +func parseConntrackStatField(field string) (uint64, error) { + val, err := strconv.ParseUint(field, 16, 64) + if err != nil { + return 0, fmt.Errorf("couldn't parse \"%s\" field: %s", field, err) + } + return val, err +} diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index 6fcad20af..db5debdf4 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -14,78 +14,89 @@ package procfs import ( + "bufio" + "bytes" "fmt" - "io/ioutil" + "io" "strconv" "strings" + + "github.com/prometheus/procfs/internal/util" ) // For the proc file format details, -// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 +// See: +// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 +// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 // and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. -// SoftnetEntry contains a single row of data from /proc/net/softnet_stat -type SoftnetEntry struct { +// SoftnetStat contains a single row of data from /proc/net/softnet_stat +type SoftnetStat struct { // Number of processed packets - Processed uint + Processed uint32 // Number of dropped packets - Dropped uint + Dropped uint32 // Number of times processing packets ran out of quota - TimeSqueezed uint + TimeSqueezed uint32 } -// GatherSoftnetStats reads /proc/net/softnet_stat, parse the relevant columns, -// and then return a slice of SoftnetEntry's. -func (fs FS) GatherSoftnetStats() ([]SoftnetEntry, error) { - data, err := ioutil.ReadFile(fs.proc.Path("net/softnet_stat")) +var softNetProcFile = "net/softnet_stat" + +// NetSoftnetStat reads data from /proc/net/softnet_stat. +func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { + b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile)) + if err != nil { + return nil, err + } + + entries, err := parseSoftnet(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("error reading softnet %s: %s", fs.proc.Path("net/softnet_stat"), err) + return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %v", err) } - return parseSoftnetEntries(data) + return entries, nil } -func parseSoftnetEntries(data []byte) ([]SoftnetEntry, error) { - lines := strings.Split(string(data), "\n") - entries := make([]SoftnetEntry, 0) - var err error - const ( - expectedColumns = 11 - ) - for _, line := range lines { - columns := strings.Fields(line) +func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { + const minColumns = 9 + + s := bufio.NewScanner(r) + + var stats []SoftnetStat + for s.Scan() { + columns := strings.Fields(s.Text()) width := len(columns) - if width == 0 { - continue - } - if width != expectedColumns { - return []SoftnetEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns) + + if width < minColumns { + return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) } - var entry SoftnetEntry - if entry, err = parseSoftnetEntry(columns); err != nil { - return []SoftnetEntry{}, err + + // We only parse the first three columns at the moment. + us, err := parseHexUint32s(columns[0:3]) + if err != nil { + return nil, err } - entries = append(entries, entry) + + stats = append(stats, SoftnetStat{ + Processed: us[0], + Dropped: us[1], + TimeSqueezed: us[2], + }) } - return entries, nil + return stats, nil } -func parseSoftnetEntry(columns []string) (SoftnetEntry, error) { - var err error - var processed, dropped, timeSqueezed uint64 - if processed, err = strconv.ParseUint(columns[0], 16, 32); err != nil { - return SoftnetEntry{}, fmt.Errorf("Unable to parse column 0: %s", err) - } - if dropped, err = strconv.ParseUint(columns[1], 16, 32); err != nil { - return SoftnetEntry{}, fmt.Errorf("Unable to parse column 1: %s", err) - } - if timeSqueezed, err = strconv.ParseUint(columns[2], 16, 32); err != nil { - return SoftnetEntry{}, fmt.Errorf("Unable to parse column 2: %s", err) +func parseHexUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 16, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) } - return SoftnetEntry{ - Processed: uint(processed), - Dropped: uint(dropped), - TimeSqueezed: uint(timeSqueezed), - }, nil + + return us, nil } diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go new file mode 100644 index 000000000..d017e3f18 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_udp.go @@ -0,0 +1,229 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "encoding/hex" + "fmt" + "io" + "net" + "os" + "strconv" + "strings" +) + +const ( + // readLimit is used by io.LimitReader while reading the content of the + // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic + // as each line represents a single used socket. + // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. + // With e.g. 150 Byte per line and the maximum number of 65535, + // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP. + readLimit = 4294967296 // Byte -> 4 GiB +) + +type ( + // NetUDP represents the contents of /proc/net/udp{,6} file without the header. + NetUDP []*netUDPLine + + // NetUDPSummary provides already computed values like the total queue lengths or + // the total number of used sockets. In contrast to NetUDP it does not collect + // the parsed lines into a slice. + NetUDPSummary struct { + // TxQueueLength shows the total queue length of all parsed tx_queue lengths. + TxQueueLength uint64 + // RxQueueLength shows the total queue length of all parsed rx_queue lengths. + RxQueueLength uint64 + // UsedSockets shows the total number of parsed lines representing the + // number of used sockets. + UsedSockets uint64 + } + + // netUDPLine represents the fields parsed from a single line + // in /proc/net/udp{,6}. Fields which are not used by UDP are skipped. + // For the proc file format details, see https://linux.die.net/man/5/proc. + netUDPLine struct { + Sl uint64 + LocalAddr net.IP + LocalPort uint64 + RemAddr net.IP + RemPort uint64 + St uint64 + TxQueue uint64 + RxQueue uint64 + UID uint64 + } +) + +// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams +// read from /proc/net/udp. +func (fs FS) NetUDP() (NetUDP, error) { + return newNetUDP(fs.proc.Path("net/udp")) +} + +// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams +// read from /proc/net/udp6. +func (fs FS) NetUDP6() (NetUDP, error) { + return newNetUDP(fs.proc.Path("net/udp6")) +} + +// NetUDPSummary returns already computed statistics like the total queue lengths +// for UDP datagrams read from /proc/net/udp. +func (fs FS) NetUDPSummary() (*NetUDPSummary, error) { + return newNetUDPSummary(fs.proc.Path("net/udp")) +} + +// NetUDP6Summary returns already computed statistics like the total queue lengths +// for UDP datagrams read from /proc/net/udp6. +func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) { + return newNetUDPSummary(fs.proc.Path("net/udp6")) +} + +// newNetUDP creates a new NetUDP{,6} from the contents of the given file. +func newNetUDP(file string) (NetUDP, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + netUDP := NetUDP{} + + lr := io.LimitReader(f, readLimit) + s := bufio.NewScanner(lr) + s.Scan() // skip first line with headers + for s.Scan() { + fields := strings.Fields(s.Text()) + line, err := parseNetUDPLine(fields) + if err != nil { + return nil, err + } + netUDP = append(netUDP, line) + } + if err := s.Err(); err != nil { + return nil, err + } + return netUDP, nil +} + +// newNetUDPSummary creates a new NetUDP{,6} from the contents of the given file. +func newNetUDPSummary(file string) (*NetUDPSummary, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + netUDPSummary := &NetUDPSummary{} + + lr := io.LimitReader(f, readLimit) + s := bufio.NewScanner(lr) + s.Scan() // skip first line with headers + for s.Scan() { + fields := strings.Fields(s.Text()) + line, err := parseNetUDPLine(fields) + if err != nil { + return nil, err + } + netUDPSummary.TxQueueLength += line.TxQueue + netUDPSummary.RxQueueLength += line.RxQueue + netUDPSummary.UsedSockets++ + } + if err := s.Err(); err != nil { + return nil, err + } + return netUDPSummary, nil +} + +// parseNetUDPLine parses a single line, represented by a list of fields. +func parseNetUDPLine(fields []string) (*netUDPLine, error) { + line := &netUDPLine{} + if len(fields) < 8 { + return nil, fmt.Errorf( + "cannot parse net udp socket line as it has less then 8 columns: %s", + strings.Join(fields, " "), + ) + } + var err error // parse error + + // sl + s := strings.Split(fields[0], ":") + if len(s) != 2 { + return nil, fmt.Errorf( + "cannot parse sl field in udp socket line: %s", fields[0]) + } + + if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { + return nil, fmt.Errorf("cannot parse sl value in udp socket line: %s", err) + } + // local_address + l := strings.Split(fields[1], ":") + if len(l) != 2 { + return nil, fmt.Errorf( + "cannot parse local_address field in udp socket line: %s", fields[1]) + } + if line.LocalAddr, err = hex.DecodeString(l[0]); err != nil { + return nil, fmt.Errorf( + "cannot parse local_address value in udp socket line: %s", err) + } + if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { + return nil, fmt.Errorf( + "cannot parse local_address port value in udp socket line: %s", err) + } + + // remote_address + r := strings.Split(fields[2], ":") + if len(r) != 2 { + return nil, fmt.Errorf( + "cannot parse rem_address field in udp socket line: %s", fields[1]) + } + if line.RemAddr, err = hex.DecodeString(r[0]); err != nil { + return nil, fmt.Errorf( + "cannot parse rem_address value in udp socket line: %s", err) + } + if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { + return nil, fmt.Errorf( + "cannot parse rem_address port value in udp socket line: %s", err) + } + + // st + if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { + return nil, fmt.Errorf( + "cannot parse st value in udp socket line: %s", err) + } + + // tx_queue and rx_queue + q := strings.Split(fields[4], ":") + if len(q) != 2 { + return nil, fmt.Errorf( + "cannot parse tx/rx queues in udp socket line as it has a missing colon: %s", + fields[4], + ) + } + if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse tx_queue value in udp socket line: %s", err) + } + if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse rx_queue value in udp socket line: %s", err) + } + + // uid + if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { + return nil, fmt.Errorf( + "cannot parse uid value in udp socket line: %s", err) + } + + return line, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index 93bd58f80..c55b4b18e 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -15,7 +15,6 @@ package procfs import ( "bufio" - "errors" "fmt" "io" "os" @@ -27,25 +26,15 @@ import ( // see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815 // and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48. -const ( - netUnixKernelPtrIdx = iota - netUnixRefCountIdx - _ - netUnixFlagsIdx - netUnixTypeIdx - netUnixStateIdx - netUnixInodeIdx - - // Inode and Path are optional. - netUnixStaticFieldsCnt = 6 -) - +// Constants for the various /proc/net/unix enumerations. +// TODO: match against x/sys/unix or similar? const ( netUnixTypeStream = 1 netUnixTypeDgram = 2 netUnixTypeSeqpacket = 5 - netUnixFlagListen = 1 << 16 + netUnixFlagDefault = 0 + netUnixFlagListen = 1 << 16 netUnixStateUnconnected = 1 netUnixStateConnecting = 2 @@ -53,129 +42,127 @@ const ( netUnixStateDisconnected = 4 ) -var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format") +// NetUNIXType is the type of the type field. +type NetUNIXType uint64 -// NetUnixType is the type of the type field. -type NetUnixType uint64 +// NetUNIXFlags is the type of the flags field. +type NetUNIXFlags uint64 -// NetUnixFlags is the type of the flags field. -type NetUnixFlags uint64 +// NetUNIXState is the type of the state field. +type NetUNIXState uint64 -// NetUnixState is the type of the state field. -type NetUnixState uint64 - -// NetUnixLine represents a line of /proc/net/unix. -type NetUnixLine struct { +// NetUNIXLine represents a line of /proc/net/unix. +type NetUNIXLine struct { KernelPtr string RefCount uint64 Protocol uint64 - Flags NetUnixFlags - Type NetUnixType - State NetUnixState + Flags NetUNIXFlags + Type NetUNIXType + State NetUNIXState Inode uint64 Path string } -// NetUnix holds the data read from /proc/net/unix. -type NetUnix struct { - Rows []*NetUnixLine +// NetUNIX holds the data read from /proc/net/unix. +type NetUNIX struct { + Rows []*NetUNIXLine } -// NewNetUnix returns data read from /proc/net/unix. -func NewNetUnix() (*NetUnix, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return nil, err - } - - return fs.NewNetUnix() +// NetUNIX returns data read from /proc/net/unix. +func (fs FS) NetUNIX() (*NetUNIX, error) { + return readNetUNIX(fs.proc.Path("net/unix")) } -// NewNetUnix returns data read from /proc/net/unix. -func (fs FS) NewNetUnix() (*NetUnix, error) { - return NewNetUnixByPath(fs.proc.Path("net/unix")) -} - -// NewNetUnixByPath returns data read from /proc/net/unix by file path. -// It might returns an error with partial parsed data, if an error occur after some data parsed. -func NewNetUnixByPath(path string) (*NetUnix, error) { - f, err := os.Open(path) +// readNetUNIX reads data in /proc/net/unix format from the specified file. +func readNetUNIX(file string) (*NetUNIX, error) { + // This file could be quite large and a streaming read is desirable versus + // reading the entire contents at once. + f, err := os.Open(file) if err != nil { return nil, err } defer f.Close() - return NewNetUnixByReader(f) + + return parseNetUNIX(f) } -// NewNetUnixByReader returns data read from /proc/net/unix by a reader. -// It might returns an error with partial parsed data, if an error occur after some data parsed. -func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) { - nu := &NetUnix{ - Rows: make([]*NetUnixLine, 0, 32), - } - scanner := bufio.NewScanner(reader) - // Omit the header line. - scanner.Scan() - header := scanner.Text() +// parseNetUNIX creates a NetUnix structure from the incoming stream. +func parseNetUNIX(r io.Reader) (*NetUNIX, error) { + // Begin scanning by checking for the existence of Inode. + s := bufio.NewScanner(r) + s.Scan() + // From the man page of proc(5), it does not contain an Inode field, - // but in actually it exists. - // This code works for both cases. - hasInode := strings.Contains(header, "Inode") + // but in actually it exists. This code works for both cases. + hasInode := strings.Contains(s.Text(), "Inode") - minFieldsCnt := netUnixStaticFieldsCnt + // Expect a minimum number of fields, but Inode and Path are optional: + // Num RefCount Protocol Flags Type St Inode Path + minFields := 6 if hasInode { - minFieldsCnt++ + minFields++ } - for scanner.Scan() { - line := scanner.Text() - item, err := nu.parseLine(line, hasInode, minFieldsCnt) + + var nu NetUNIX + for s.Scan() { + line := s.Text() + item, err := nu.parseLine(line, hasInode, minFields) if err != nil { - return nu, err + return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %v", line, err) } + nu.Rows = append(nu.Rows, item) } - return nu, scanner.Err() + if err := s.Err(); err != nil { + return nil, fmt.Errorf("failed to scan /proc/net/unix data: %v", err) + } + + return &nu, nil } -func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) { +func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { fields := strings.Fields(line) - fieldsLen := len(fields) - if fieldsLen < minFieldsCnt { - return nil, fmt.Errorf( - "Parse Unix domain failed: expect at least %d fields but got %d", - minFieldsCnt, fieldsLen) - } - kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx]) - if err != nil { - return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err) + + l := len(fields) + if l < min { + return nil, fmt.Errorf("expected at least %d fields but got %d", min, l) } - users, err := u.parseUsers(fields[netUnixRefCountIdx]) + + // Field offsets are as follows: + // Num RefCount Protocol Flags Type St Inode Path + + kernelPtr := strings.TrimSuffix(fields[0], ":") + + users, err := u.parseUsers(fields[1]) if err != nil { - return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err) + return nil, fmt.Errorf("failed to parse ref count(%s): %v", fields[1], err) } - flags, err := u.parseFlags(fields[netUnixFlagsIdx]) + + flags, err := u.parseFlags(fields[3]) if err != nil { - return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err) + return nil, fmt.Errorf("failed to parse flags(%s): %v", fields[3], err) } - typ, err := u.parseType(fields[netUnixTypeIdx]) + + typ, err := u.parseType(fields[4]) if err != nil { - return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err) + return nil, fmt.Errorf("failed to parse type(%s): %v", fields[4], err) } - state, err := u.parseState(fields[netUnixStateIdx]) + + state, err := u.parseState(fields[5]) if err != nil { - return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err) + return nil, fmt.Errorf("failed to parse state(%s): %v", fields[5], err) } + var inode uint64 if hasInode { - inodeStr := fields[netUnixInodeIdx] - inode, err = u.parseInode(inodeStr) + inode, err = u.parseInode(fields[6]) if err != nil { - return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err) + return nil, fmt.Errorf("failed to parse inode(%s): %v", fields[6], err) } } - nuLine := &NetUnixLine{ + n := &NetUNIXLine{ KernelPtr: kernelPtr, RefCount: users, Type: typ, @@ -185,57 +172,56 @@ func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetU } // Path field is optional. - if fieldsLen > minFieldsCnt { - pathIdx := netUnixInodeIdx + 1 + if l > min { + // Path occurs at either index 6 or 7 depending on whether inode is + // already present. + pathIdx := 7 if !hasInode { pathIdx-- } - nuLine.Path = fields[pathIdx] - } - - return nuLine, nil -} -func (u NetUnix) parseKernelPtr(str string) (string, error) { - if !strings.HasSuffix(str, ":") { - return "", errInvalidKernelPtrFmt + n.Path = fields[pathIdx] } - return str[:len(str)-1], nil + + return n, nil } -func (u NetUnix) parseUsers(hexStr string) (uint64, error) { - return strconv.ParseUint(hexStr, 16, 32) +func (u NetUNIX) parseUsers(s string) (uint64, error) { + return strconv.ParseUint(s, 16, 32) } -func (u NetUnix) parseType(hexStr string) (NetUnixType, error) { - typ, err := strconv.ParseUint(hexStr, 16, 16) +func (u NetUNIX) parseType(s string) (NetUNIXType, error) { + typ, err := strconv.ParseUint(s, 16, 16) if err != nil { return 0, err } - return NetUnixType(typ), nil + + return NetUNIXType(typ), nil } -func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) { - flags, err := strconv.ParseUint(hexStr, 16, 32) +func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) { + flags, err := strconv.ParseUint(s, 16, 32) if err != nil { return 0, err } - return NetUnixFlags(flags), nil + + return NetUNIXFlags(flags), nil } -func (u NetUnix) parseState(hexStr string) (NetUnixState, error) { - st, err := strconv.ParseInt(hexStr, 16, 8) +func (u NetUNIX) parseState(s string) (NetUNIXState, error) { + st, err := strconv.ParseInt(s, 16, 8) if err != nil { return 0, err } - return NetUnixState(st), nil + + return NetUNIXState(st), nil } -func (u NetUnix) parseInode(inodeStr string) (uint64, error) { - return strconv.ParseUint(inodeStr, 10, 64) +func (u NetUNIX) parseInode(s string) (uint64, error) { + return strconv.ParseUint(s, 10, 64) } -func (t NetUnixType) String() string { +func (t NetUNIXType) String() string { switch t { case netUnixTypeStream: return "stream" @@ -247,7 +233,7 @@ func (t NetUnixType) String() string { return "unknown" } -func (f NetUnixFlags) String() string { +func (f NetUNIXFlags) String() string { switch f { case netUnixFlagListen: return "listen" @@ -256,7 +242,7 @@ func (f NetUnixFlags) String() string { } } -func (s NetUnixState) String() string { +func (s NetUNIXState) String() string { switch s { case netUnixStateUnconnected: return "unconnected" diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 330e472c7..9f97b6e52 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -134,6 +134,27 @@ func (p Proc) CmdLine() ([]string, error) { return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil } +// Wchan returns the wchan (wait channel) of a process. +func (p Proc) Wchan() (string, error) { + f, err := os.Open(p.path("wchan")) + if err != nil { + return "", err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + + wchan := string(data) + if wchan == "" || wchan == "0" { + return "", nil + } + + return wchan, nil +} + // Comm returns the command name of a process. func (p Proc) Comm() (string, error) { data, err := util.ReadFileNoStat(p.path("comm")) diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go new file mode 100644 index 000000000..4abd46451 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -0,0 +1,98 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a +// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource +// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies +// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in +// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of +// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID +// in this hierarchy +// +// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html +type Cgroup struct { + // HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one + // hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number + HierarchyID int + // Controllers using this hierarchy of processes. Controllers are also known as subsystems. For + // Cgroups V2 this may be empty, as all active controllers use the same hierarchy + Controllers []string + // Path of this control group, relative to the mount point of the cgroupfs representing this specific + // hierarchy + Path string +} + +// parseCgroupString parses each line of the /proc/[pid]/cgroup file +// Line format is hierarchyID:[controller1,controller2]:path +func parseCgroupString(cgroupStr string) (*Cgroup, error) { + var err error + + fields := strings.Split(cgroupStr, ":") + if len(fields) < 3 { + return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr) + } + + cgroup := &Cgroup{ + Path: fields[2], + Controllers: nil, + } + cgroup.HierarchyID, err = strconv.Atoi(fields[0]) + if err != nil { + return nil, fmt.Errorf("failed to parse hierarchy ID") + } + if fields[1] != "" { + ssNames := strings.Split(fields[1], ",") + cgroup.Controllers = append(cgroup.Controllers, ssNames...) + } + return cgroup, nil +} + +// parseCgroups reads each line of the /proc/[pid]/cgroup file +func parseCgroups(data []byte) ([]Cgroup, error) { + var cgroups []Cgroup + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + mountString := scanner.Text() + parsedMounts, err := parseCgroupString(mountString) + if err != nil { + return nil, err + } + cgroups = append(cgroups, *parsedMounts) + } + + err := scanner.Err() + return cgroups, err +} + +// Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process +// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes, +// so the len of the returned struct is equal to the number of active hierarchies on this system +func (p Proc) Cgroups() ([]Cgroup, error) { + data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/cgroup", p.PID)) + if err != nil { + return nil, err + } + return parseCgroups(data) +} diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index 4e7597f86..a76ca7079 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -16,6 +16,7 @@ package procfs import ( "bufio" "bytes" + "errors" "regexp" "github.com/prometheus/procfs/internal/util" @@ -23,10 +24,11 @@ import ( // Regexp variables var ( - rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) - rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) - rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) - rInotify = regexp.MustCompile(`^inotify`) + rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) + rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) + rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) + rInotify = regexp.MustCompile(`^inotify`) + rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) ) // ProcFDInfo contains represents file descriptor information. @@ -39,7 +41,7 @@ type ProcFDInfo struct { Flags string // Mount point ID MntID string - // List of inotify lines (structed) in the fdinfo file (kernel 3.8+ only) + // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) InotifyInfos []InotifyInfo } @@ -96,15 +98,21 @@ type InotifyInfo struct { // InotifyInfo constructor. Only available on kernel 3.8+. func parseInotifyInfo(line string) (*InotifyInfo, error) { - r := regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)\s+mask:([0-9a-f]+)`) - m := r.FindStringSubmatch(line) - i := &InotifyInfo{ - WD: m[1], - Ino: m[2], - Sdev: m[3], - Mask: m[4], + m := rInotifyParts.FindStringSubmatch(line) + if len(m) >= 4 { + var mask string + if len(m) == 5 { + mask = m[4] + } + i := &InotifyInfo{ + WD: m[1], + Ino: m[2], + Sdev: m[3], + Mask: mask, + } + return i, nil } - return i, nil + return nil, errors.New("invalid inode entry: " + line) } // ProcFDInfos represents a list of ProcFDInfo structs. diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go new file mode 100644 index 000000000..1d7772d51 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -0,0 +1,209 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + +// ProcMapPermissions contains permission settings read from /proc/[pid]/maps +type ProcMapPermissions struct { + // mapping has the [R]ead flag set + Read bool + // mapping has the [W]rite flag set + Write bool + // mapping has the [X]ecutable flag set + Execute bool + // mapping has the [S]hared flag set + Shared bool + // mapping is marked as [P]rivate (copy on write) + Private bool +} + +// ProcMap contains the process memory-mappings of the process, +// read from /proc/[pid]/maps +type ProcMap struct { + // The start address of current mapping. + StartAddr uintptr + // The end address of the current mapping + EndAddr uintptr + // The permissions for this mapping + Perms *ProcMapPermissions + // The current offset into the file/fd (e.g., shared libs) + Offset int64 + // Device owner of this mapping (major:minor) in Mkdev format. + Dev uint64 + // The inode of the device above + Inode uint64 + // The file or psuedofile (or empty==anonymous) + Pathname string +} + +// parseDevice parses the device token of a line and converts it to a dev_t +// (mkdev) like structure. +func parseDevice(s string) (uint64, error) { + toks := strings.Split(s, ":") + if len(toks) < 2 { + return 0, fmt.Errorf("unexpected number of fields") + } + + major, err := strconv.ParseUint(toks[0], 16, 0) + if err != nil { + return 0, err + } + + minor, err := strconv.ParseUint(toks[1], 16, 0) + if err != nil { + return 0, err + } + + return unix.Mkdev(uint32(major), uint32(minor)), nil +} + +// parseAddress just converts a hex-string to a uintptr +func parseAddress(s string) (uintptr, error) { + a, err := strconv.ParseUint(s, 16, 0) + if err != nil { + return 0, err + } + + return uintptr(a), nil +} + +// parseAddresses parses the start-end address +func parseAddresses(s string) (uintptr, uintptr, error) { + toks := strings.Split(s, "-") + if len(toks) < 2 { + return 0, 0, fmt.Errorf("invalid address") + } + + saddr, err := parseAddress(toks[0]) + if err != nil { + return 0, 0, err + } + + eaddr, err := parseAddress(toks[1]) + if err != nil { + return 0, 0, err + } + + return saddr, eaddr, nil +} + +// parsePermissions parses a token and returns any that are set. +func parsePermissions(s string) (*ProcMapPermissions, error) { + if len(s) < 4 { + return nil, fmt.Errorf("invalid permissions token") + } + + perms := ProcMapPermissions{} + for _, ch := range s { + switch ch { + case 'r': + perms.Read = true + case 'w': + perms.Write = true + case 'x': + perms.Execute = true + case 'p': + perms.Private = true + case 's': + perms.Shared = true + } + } + + return &perms, nil +} + +// parseProcMap will attempt to parse a single line within a proc/[pid]/maps +// buffer. +func parseProcMap(text string) (*ProcMap, error) { + fields := strings.Fields(text) + if len(fields) < 5 { + return nil, fmt.Errorf("truncated procmap entry") + } + + saddr, eaddr, err := parseAddresses(fields[0]) + if err != nil { + return nil, err + } + + perms, err := parsePermissions(fields[1]) + if err != nil { + return nil, err + } + + offset, err := strconv.ParseInt(fields[2], 16, 0) + if err != nil { + return nil, err + } + + device, err := parseDevice(fields[3]) + if err != nil { + return nil, err + } + + inode, err := strconv.ParseUint(fields[4], 10, 0) + if err != nil { + return nil, err + } + + pathname := "" + + if len(fields) >= 5 { + pathname = strings.Join(fields[5:], " ") + } + + return &ProcMap{ + StartAddr: saddr, + EndAddr: eaddr, + Perms: perms, + Offset: offset, + Dev: device, + Inode: inode, + Pathname: pathname, + }, nil +} + +// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the +// process. +func (p Proc) ProcMaps() ([]*ProcMap, error) { + file, err := os.Open(p.path("maps")) + if err != nil { + return nil, err + } + defer file.Close() + + maps := []*ProcMap{} + scan := bufio.NewScanner(file) + + for scan.Scan() { + m, err := parseProcMap(scan.Text()) + if err != nil { + return nil, err + } + + maps = append(maps, m) + } + + return maps, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go new file mode 100644 index 000000000..a576a720a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -0,0 +1,165 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "bufio" + "errors" + "fmt" + "os" + "regexp" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +var ( + // match the header line before each mapped zone in /proc/pid/smaps + procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) +) + +type ProcSMapsRollup struct { + // Amount of the mapping that is currently resident in RAM + Rss uint64 + // Process's proportional share of this mapping + Pss uint64 + // Size in bytes of clean shared pages + SharedClean uint64 + // Size in bytes of dirty shared pages + SharedDirty uint64 + // Size in bytes of clean private pages + PrivateClean uint64 + // Size in bytes of dirty private pages + PrivateDirty uint64 + // Amount of memory currently marked as referenced or accessed + Referenced uint64 + // Amount of memory that does not belong to any file + Anonymous uint64 + // Amount would-be-anonymous memory currently on swap + Swap uint64 + // Process's proportional memory on swap + SwapPss uint64 +} + +// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the +// process. +// +// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will +// we read and summed. +func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) { + data, err := util.ReadFileNoStat(p.path("smaps_rollup")) + if err != nil && os.IsNotExist(err) { + return p.procSMapsRollupManual() + } + if err != nil { + return ProcSMapsRollup{}, err + } + + lines := strings.Split(string(data), "\n") + smaps := ProcSMapsRollup{} + + // skip first line which don't contains information we need + lines = lines[1:] + for _, line := range lines { + if line == "" { + continue + } + + if err := smaps.parseLine(line); err != nil { + return ProcSMapsRollup{}, err + } + } + + return smaps, nil +} + +// Read /proc/pid/smaps and do the roll-up in Go code. +func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { + file, err := os.Open(p.path("smaps")) + if err != nil { + return ProcSMapsRollup{}, err + } + defer file.Close() + + smaps := ProcSMapsRollup{} + scan := bufio.NewScanner(file) + + for scan.Scan() { + line := scan.Text() + + if procSMapsHeaderLine.MatchString(line) { + continue + } + + if err := smaps.parseLine(line); err != nil { + return ProcSMapsRollup{}, err + } + } + + return smaps, nil +} + +func (s *ProcSMapsRollup) parseLine(line string) error { + kv := strings.SplitN(line, ":", 2) + if len(kv) != 2 { + fmt.Println(line) + return errors.New("invalid net/dev line, missing colon") + } + + k := kv[0] + if k == "VmFlags" { + return nil + } + + v := strings.TrimSpace(kv[1]) + v = strings.TrimRight(v, " kB") + + vKBytes, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return err + } + vBytes := vKBytes * 1024 + + s.addValue(k, v, vKBytes, vBytes) + + return nil +} + +func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) { + switch k { + case "Rss": + s.Rss += vUintBytes + case "Pss": + s.Pss += vUintBytes + case "Shared_Clean": + s.SharedClean += vUintBytes + case "Shared_Dirty": + s.SharedDirty += vUintBytes + case "Private_Clean": + s.PrivateClean += vUintBytes + case "Private_Dirty": + s.PrivateDirty += vUintBytes + case "Referenced": + s.Referenced += vUintBytes + case "Anonymous": + s.Anonymous += vUintBytes + case "Swap": + s.Swap += vUintBytes + case "SwapPss": + s.SwapPss += vUintBytes + } +} diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index e30c2b88f..c58346d91 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -33,37 +33,37 @@ type ProcStatus struct { TGID int // Peak virtual memory size. - VmPeak uint64 + VmPeak uint64 // nolint:golint // Virtual memory size. - VmSize uint64 + VmSize uint64 // nolint:golint // Locked memory size. - VmLck uint64 + VmLck uint64 // nolint:golint // Pinned memory size. - VmPin uint64 + VmPin uint64 // nolint:golint // Peak resident set size. - VmHWM uint64 + VmHWM uint64 // nolint:golint // Resident set size (sum of RssAnnon RssFile and RssShmem). - VmRSS uint64 + VmRSS uint64 // nolint:golint // Size of resident anonymous memory. - RssAnon uint64 + RssAnon uint64 // nolint:golint // Size of resident file mappings. - RssFile uint64 + RssFile uint64 // nolint:golint // Size of resident shared memory. - RssShmem uint64 + RssShmem uint64 // nolint:golint // Size of data segments. - VmData uint64 + VmData uint64 // nolint:golint // Size of stack segments. - VmStk uint64 + VmStk uint64 // nolint:golint // Size of text segments. - VmExe uint64 + VmExe uint64 // nolint:golint // Shared library code size. - VmLib uint64 + VmLib uint64 // nolint:golint // Page table entries size. - VmPTE uint64 + VmPTE uint64 // nolint:golint // Size of second-level page tables. - VmPMD uint64 + VmPMD uint64 // nolint:golint // Swapped-out virtual memory size by anonymous private. - VmSwap uint64 + VmSwap uint64 // nolint:golint // Size of hugetlb memory portions HugetlbPages uint64 @@ -71,6 +71,9 @@ type ProcStatus struct { VoluntaryCtxtSwitches uint64 // Number of involuntary context switches. NonVoluntaryCtxtSwitches uint64 + + // UIDs of the process (Real, effective, saved set, and filesystem UIDs (GIDs)) + UIDs [4]string } // NewStatus returns the current status information of the process. @@ -114,6 +117,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt s.TGID = int(vUint) case "Name": s.Name = vString + case "Uid": + copy(s.UIDs[:], strings.Split(vString, "\t")) case "VmPeak": s.VmPeak = vUintBytes case "VmSize": diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go new file mode 100644 index 000000000..15edc2212 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/swaps.go @@ -0,0 +1,89 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Swap represents an entry in /proc/swaps. +type Swap struct { + Filename string + Type string + Size int + Used int + Priority int +} + +// Swaps returns a slice of all configured swap devices on the system. +func (fs FS) Swaps() ([]*Swap, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("swaps")) + if err != nil { + return nil, err + } + return parseSwaps(data) +} + +func parseSwaps(info []byte) ([]*Swap, error) { + swaps := []*Swap{} + scanner := bufio.NewScanner(bytes.NewReader(info)) + scanner.Scan() // ignore header line + for scanner.Scan() { + swapString := scanner.Text() + parsedSwap, err := parseSwapString(swapString) + if err != nil { + return nil, err + } + swaps = append(swaps, parsedSwap) + } + + err := scanner.Err() + return swaps, err +} + +func parseSwapString(swapString string) (*Swap, error) { + var err error + + swapFields := strings.Fields(swapString) + swapLength := len(swapFields) + if swapLength < 5 { + return nil, fmt.Errorf("too few fields in swap string: %s", swapString) + } + + swap := &Swap{ + Filename: swapFields[0], + Type: swapFields[1], + } + + swap.Size, err = strconv.Atoi(swapFields[2]) + if err != nil { + return nil, fmt.Errorf("invalid swap size: %s", swapFields[2]) + } + swap.Used, err = strconv.Atoi(swapFields[3]) + if err != nil { + return nil, fmt.Errorf("invalid swap used: %s", swapFields[3]) + } + swap.Priority, err = strconv.Atoi(swapFields[4]) + if err != nil { + return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4]) + } + + return swap, nil +} diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml new file mode 100644 index 000000000..65dc28503 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.golangci.yml @@ -0,0 +1,40 @@ +run: + # do not run on test files yet + tests: false + +# all available settings of specific linters +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + + lll: + line-length: 100 + tab-width: 4 + + prealloc: + simple: false + range-loops: false + for-loops: false + + whitespace: + multi-if: false # Enforces newlines (or comments) after every multi-line if statement + multi-func: false # Enforces newlines (or comments) after every multi-line function signature + +linters: + enable: + - megacheck + - govet + disable: + - maligned + - prealloc + disable-all: false + presets: + - bugs + - unused + fast: false diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml index 848938a6d..5e20aa414 100644 --- a/vendor/github.com/sirupsen/logrus/.travis.yml +++ b/vendor/github.com/sirupsen/logrus/.travis.yml @@ -4,21 +4,13 @@ git: depth: 1 env: - GO111MODULE=on - - GO111MODULE=off -go: [ 1.11.x, 1.12.x ] -os: [ linux, osx ] -matrix: - exclude: - - go: 1.12.x - env: GO111MODULE=off - - go: 1.11.x - os: osx +go: [1.13.x, 1.14.x] +os: [linux, osx] install: - ./travis/install.sh - - if [[ "$GO111MODULE" == "on" ]]; then go mod download; fi - - if [[ "$GO111MODULE" == "off" ]]; then go get github.com/stretchr/testify/assert golang.org/x/sys/unix github.com/konsorten/go-windows-terminal-sequences; fi script: - ./travis/cross_build.sh + - ./travis/lint.sh - export GOMAXPROCS=4 - export GORACE=halt_on_error=1 - go test -race -v ./... diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md index 51a7ab0ca..584026d67 100644 --- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -1,9 +1,32 @@ +# 1.6.0 +Fixes: + * end of line cleanup + * revert the entry concurrency bug fix whic leads to deadlock under some circumstances + * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 + +Features: + * add an option to the `TextFormatter` to completely disable fields quoting + +# 1.5.0 +Code quality: + * add golangci linter run on travis + +Fixes: + * add mutex for hooks concurrent access on `Entry` data + * caller function field for go1.14 + * fix build issue for gopherjs target + +Feature: + * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level + * add a `DisableHTMLEscape` option in the `JSONFormatter` + * add `ForceQuote` and `PadLevelText` options in the `TextFormatter` + # 1.4.2 * Fixes build break for plan9, nacl, solaris # 1.4.1 This new release introduces: * Enhance TextFormatter to not print caller information when they are empty (#944) - * Remove dependency on golang.org/x/crypto (#932, #943) + * Remove dependency on golang.org/x/crypto (#932, #943) Fixes: * Fix Entry.WithContext method to return a copy of the initial entry (#941) @@ -11,7 +34,7 @@ Fixes: # 1.4.0 This new release introduces: * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). - * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter (#909, #911) + * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911) * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). Fixes: diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index a4796eb07..5796706db 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -1,8 +1,28 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) +# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with the standard library logger. +**Logrus is in maintenance-mode.** We will not be introducing new features. It's +simply too hard to do in a way that won't break many people's projects, which is +the last thing you want from your Logging library (again...). + +This does not mean Logrus is dead. Logrus will continue to be maintained for +security, (backwards compatible) bug fixes, and performance (where we are +limited by the interface). + +I believe Logrus' biggest contribution is to have played a part in today's +widespread use of structured logging in Golang. There doesn't seem to be a +reason to do a major, breaking iteration into Logrus V2, since the fantastic Go +community has built those independently. Many fantastic alternatives have sprung +up. Logrus would look like those, had it been re-designed with what we know +about structured logging in Go today. Check out, for example, +[Zerolog][zerolog], [Zap][zap], and [Apex][apex]. + +[zerolog]: https://github.com/rs/zerolog +[zap]: https://github.com/uber-go/zap +[apex]: https://github.com/apex/log + **Seeing weird case-sensitive problems?** It's in the past been possible to import Logrus as both upper- and lower-case. Due to the Go package environment, this caused issues in the community and we needed a standard. Some environments @@ -15,11 +35,6 @@ comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). For an in-depth explanation of the casing issue, see [this comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). -**Are you interested in assisting in maintaining Logrus?** Currently I have a -lot of obligations, and I am unable to provide Logrus with the maintainership it -needs. If you'd like to help, please reach out to me at `simon at author's -username dot com`. - Nicely color-coded in development (when a TTY is attached, otherwise just plain text): @@ -187,7 +202,7 @@ func main() { log.Out = os.Stdout // You could set this to any `io.Writer` such as a file - // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) // if err == nil { // log.Out = file // } else { @@ -272,7 +287,7 @@ func init() { ``` Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). -A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) +A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) #### Level logging @@ -354,6 +369,7 @@ The built-in logging formatters are: [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). * When colors are enabled, levels are truncated to 4 characters by default. To disable truncation set the `DisableLevelTruncation` field to `true`. + * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). * `logrus.JSONFormatter`. Logs fields as JSON. * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). @@ -364,8 +380,10 @@ Third party logging formatters: * [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). * [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. * [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. -* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. * [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. +* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. +* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. You can define your formatter by implementing the `Formatter` interface, requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a @@ -430,14 +448,14 @@ entries. It should not be a feature of the application-level logger. | Tool | Description | | ---- | ----------- | -|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.| |[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | #### Testing Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: -* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook * a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): ```go @@ -465,7 +483,7 @@ func TestSomething(t*testing.T){ Logrus can register one or more functions that will be called when any `fatal` level message is logged. The registered handlers will be executed before -logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need +logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. ``` @@ -490,6 +508,6 @@ Situation when locking is not needed includes: 1) logger.Out is protected by locks. - 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing) + 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing) (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml index 96c2ce15f..df9d65c3a 100644 --- a/vendor/github.com/sirupsen/logrus/appveyor.yml +++ b/vendor/github.com/sirupsen/logrus/appveyor.yml @@ -1,14 +1,14 @@ -version: "{build}" -platform: x64 -clone_folder: c:\gopath\src\github.com\sirupsen\logrus -environment: - GOPATH: c:\gopath -branches: - only: - - master -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version -build_script: - - go get -t - - go test +version: "{build}" +platform: x64 +clone_folder: c:\gopath\src\github.com\sirupsen\logrus +environment: + GOPATH: c:\gopath +branches: + only: + - master +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version +build_script: + - go get -t + - go test diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go index 63e25583c..f6e062a34 100644 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -85,10 +85,15 @@ func NewEntry(logger *Logger) *Entry { } } +// Returns the bytes representation of this entry from the formatter. +func (entry *Entry) Bytes() ([]byte, error) { + return entry.Logger.Formatter.Format(entry) +} + // Returns the string representation from the reader and ultimately the // formatter. func (entry *Entry) String() (string, error) { - serialized, err := entry.Logger.Formatter.Format(entry) + serialized, err := entry.Bytes() if err != nil { return "", err } @@ -103,7 +108,11 @@ func (entry *Entry) WithError(err error) *Entry { // Add a context to the Entry. func (entry *Entry) WithContext(ctx context.Context) *Entry { - return &Entry{Logger: entry.Logger, Data: entry.Data, Time: entry.Time, err: entry.err, Context: ctx} + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} } // Add a single field to the Entry. @@ -144,7 +153,11 @@ func (entry *Entry) WithFields(fields Fields) *Entry { // Overrides the time of the Entry. func (entry *Entry) WithTime(t time.Time) *Entry { - return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t, err: entry.err, Context: entry.Context} + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} } // getPackageName reduces a fully qualified function name to the package name @@ -165,15 +178,20 @@ func getPackageName(f string) string { // getCaller retrieves the name of the first non-logrus calling function func getCaller() *runtime.Frame { - // cache this package's fully-qualified name callerInitOnce.Do(func() { - pcs := make([]uintptr, 2) + pcs := make([]uintptr, maximumCallerDepth) _ = runtime.Callers(0, pcs) - logrusPackage = getPackageName(runtime.FuncForPC(pcs[1]).Name()) - // now that we have the cache, we can skip a minimum count of known-logrus functions - // XXX this is dubious, the number of frames may vary + // dynamic get the package name and the minimum caller depth + for i := 0; i < maximumCallerDepth; i++ { + funcName := runtime.FuncForPC(pcs[i]).Name() + if strings.Contains(funcName, "getCaller") { + logrusPackage = getPackageName(funcName) + break + } + } + minimumCallerDepth = knownLogrusFrames }) @@ -187,7 +205,7 @@ func getCaller() *runtime.Frame { // If the caller isn't part of this package, we're done if pkg != logrusPackage { - return &f + return &f //nolint:scopelint } } @@ -217,9 +235,11 @@ func (entry Entry) log(level Level, msg string) { entry.Level = level entry.Message = msg + entry.Logger.mu.Lock() if entry.Logger.ReportCaller { entry.Caller = getCaller() } + entry.Logger.mu.Unlock() entry.fireHooks() @@ -255,11 +275,10 @@ func (entry *Entry) write() { serialized, err := entry.Logger.Formatter.Format(entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - } else { - _, err = entry.Logger.Out.Write(serialized) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } + return + } + if _, err = entry.Logger.Out.Write(serialized); err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) } } diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go index 62fc2f219..42b04f6c8 100644 --- a/vendor/github.com/sirupsen/logrus/exported.go +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -80,7 +80,7 @@ func WithFields(fields Fields) *Entry { return std.WithFields(fields) } -// WithTime creats an entry from the standard logger and overrides the time of +// WithTime creates an entry from the standard logger and overrides the time of // logs generated with it. // // Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod index 12fdf9898..d41329679 100644 --- a/vendor/github.com/sirupsen/logrus/go.mod +++ b/vendor/github.com/sirupsen/logrus/go.mod @@ -2,9 +2,10 @@ module github.com/sirupsen/logrus require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/konsorten/go-windows-terminal-sequences v1.0.1 + github.com/konsorten/go-windows-terminal-sequences v1.0.3 github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/objx v0.1.1 // indirect github.com/stretchr/testify v1.2.2 golang.org/x/sys v0.0.0-20190422165155-953cdadca894 ) + +go 1.13 diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum index 596c318b9..49c690f23 100644 --- a/vendor/github.com/sirupsen/logrus/go.sum +++ b/vendor/github.com/sirupsen/logrus/go.sum @@ -1,16 +1,12 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs= -github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go index 098a21a06..ba7f23711 100644 --- a/vendor/github.com/sirupsen/logrus/json_formatter.go +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -28,6 +28,9 @@ type JSONFormatter struct { // DisableTimestamp allows disabling automatic timestamps in output DisableTimestamp bool + // DisableHTMLEscape allows disabling html escaping in output + DisableHTMLEscape bool + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. DataKey string @@ -110,6 +113,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { } encoder := json.NewEncoder(b) + encoder.SetEscapeHTML(!f.DisableHTMLEscape) if f.PrettyPrint { encoder.SetIndent("", " ") } diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go index c0c0b1e55..6fdda748e 100644 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -68,10 +68,10 @@ func (mw *MutexWrap) Disable() { // `Out` and `Hooks` directly on the default logger instance. You can also just // instantiate your own: // -// var log = &Logger{ +// var log = &logrus.Logger{ // Out: os.Stderr, -// Formatter: new(JSONFormatter), -// Hooks: make(LevelHooks), +// Formatter: new(logrus.JSONFormatter), +// Hooks: make(logrus.LevelHooks), // Level: logrus.DebugLevel, // } // @@ -100,8 +100,9 @@ func (logger *Logger) releaseEntry(entry *Entry) { logger.entryPool.Put(entry) } -// Adds a field to the log entry, note that it doesn't log until you call -// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry. +// WithField allocates a new entry and adds a field to it. +// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to +// this new returned entry. // If you want multiple fields, use `WithFields`. func (logger *Logger) WithField(key string, value interface{}) *Entry { entry := logger.newEntry() diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go index 8644761f7..2f16224cb 100644 --- a/vendor/github.com/sirupsen/logrus/logrus.go +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -51,7 +51,7 @@ func (level *Level) UnmarshalText(text []byte) error { return err } - *level = Level(l) + *level = l return nil } diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go index 3c4f43f91..499789984 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go @@ -1,4 +1,5 @@ // +build darwin dragonfly freebsd netbsd openbsd +// +build !js package logrus @@ -10,4 +11,3 @@ func isTerminal(fd int) bool { _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) return err == nil } - diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go new file mode 100644 index 000000000..ebdae3ec6 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_js.go @@ -0,0 +1,7 @@ +// +build js + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go index 355dc966f..cc4fe6e31 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -1,4 +1,5 @@ // +build linux aix +// +build !js package logrus @@ -10,4 +11,3 @@ func isTerminal(fd int) bool { _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) return err == nil } - diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go index e01587c43..3c28b54ca 100644 --- a/vendor/github.com/sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -6,9 +6,11 @@ import ( "os" "runtime" "sort" + "strconv" "strings" "sync" "time" + "unicode/utf8" ) const ( @@ -32,6 +34,14 @@ type TextFormatter struct { // Force disabling colors. DisableColors bool + // Force quoting of all values + ForceQuote bool + + // DisableQuote disables quoting for all values. + // DisableQuote will have a lower priority than ForceQuote. + // If both of them are set to true, quote will be forced on all values. + DisableQuote bool + // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ EnvironmentOverrideColors bool @@ -57,6 +67,10 @@ type TextFormatter struct { // Disables the truncation of the level text to 4 characters. DisableLevelTruncation bool + // PadLevelText Adds padding the level text so that all the levels output at the same length + // PadLevelText is a superset of the DisableLevelTruncation option + PadLevelText bool + // QuoteEmptyFields will wrap empty fields in quotes if true QuoteEmptyFields bool @@ -79,23 +93,32 @@ type TextFormatter struct { CallerPrettyfier func(*runtime.Frame) (function string, file string) terminalInitOnce sync.Once + + // The max length of the level text, generated dynamically on init + levelTextMaxLength int } func (f *TextFormatter) init(entry *Entry) { if entry.Logger != nil { f.isTerminal = checkIfTerminal(entry.Logger.Out) } + // Get the max length of the level text + for _, level := range AllLevels { + levelTextLength := utf8.RuneCount([]byte(level.String())) + if levelTextLength > f.levelTextMaxLength { + f.levelTextMaxLength = levelTextLength + } + } } func (f *TextFormatter) isColored() bool { isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) if f.EnvironmentOverrideColors { - if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" { + switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { + case ok && force != "0": isColored = true - } else if ok && force == "0" { - isColored = false - } else if os.Getenv("CLICOLOR") == "0" { + case ok && force == "0", os.Getenv("CLICOLOR") == "0": isColored = false } } @@ -217,9 +240,18 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin } levelText := strings.ToUpper(entry.Level.String()) - if !f.DisableLevelTruncation { + if !f.DisableLevelTruncation && !f.PadLevelText { levelText = levelText[0:4] } + if f.PadLevelText { + // Generates the format string used in the next line, for example "%-6s" or "%-7s". + // Based on the max level text length. + formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" + // Formats the level text by appending spaces up to the max length, for example: + // - "INFO " + // - "WARNING" + levelText = fmt.Sprintf(formatString, levelText) + } // Remove a single newline if it already exists in the message to keep // the behavior of logrus text_formatter the same as the stdlib log package @@ -243,11 +275,12 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin } } - if f.DisableTimestamp { + switch { + case f.DisableTimestamp: fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) - } else if !f.FullTimestamp { + case !f.FullTimestamp: fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) - } else { + default: fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) } for _, k := range keys { @@ -258,9 +291,15 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin } func (f *TextFormatter) needsQuoting(text string) bool { + if f.ForceQuote { + return true + } if f.QuoteEmptyFields && len(text) == 0 { return true } + if f.DisableQuote { + return false + } for _, ch := range text { if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go index 9e1f75135..72e8e3a1b 100644 --- a/vendor/github.com/sirupsen/logrus/writer.go +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -6,10 +6,16 @@ import ( "runtime" ) +// Writer at INFO level. See WriterLevel for details. func (logger *Logger) Writer() *io.PipeWriter { return logger.WriterLevel(InfoLevel) } +// WriterLevel returns an io.Writer that can be used to write arbitrary text to +// the logger at the given log level. Each line written to the writer will be +// printed in the usual way using formatters and hooks. The writer is part of an +// io.Pipe and it is the callers responsibility to close the writer when done. +// This can be used to override the standard library logger easily. func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { return NewEntry(logger).WriterLevel(level) } diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md index 6db846f10..aef8b6ebc 100644 --- a/vendor/go.uber.org/atomic/CHANGELOG.md +++ b/vendor/go.uber.org/atomic/CHANGELOG.md @@ -4,6 +4,10 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [1.6.0] - 2020-02-24 +### Changed +- Drop library dependency on `golang.org/x/{lint, tools}`. + ## [1.5.1] - 2019-11-19 - Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together causing `CAS` to fail even though the old value matches. @@ -48,6 +52,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Initial release. +[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 [1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 [1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 [1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 diff --git a/vendor/go.uber.org/atomic/tools.go b/vendor/go.uber.org/atomic/tools.go deleted file mode 100644 index 654f5b2fe..000000000 --- a/vendor/go.uber.org/atomic/tools.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2019 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// +build tools - -package atomic - -import ( - // Tools used during development. - _ "golang.org/x/lint/golint" -) diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md index abb355a06..3110c5af0 100644 --- a/vendor/go.uber.org/multierr/CHANGELOG.md +++ b/vendor/go.uber.org/multierr/CHANGELOG.md @@ -1,6 +1,12 @@ Releases ======== +v1.5.0 (2020-02-24) +=================== + +- Drop library dependency on development-time tooling. + + v1.4.0 (2019-11-04) =================== diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go index 0ee6fe8bc..04eb9618c 100644 --- a/vendor/go.uber.org/multierr/error.go +++ b/vendor/go.uber.org/multierr/error.go @@ -130,7 +130,7 @@ type errorGroup interface { } // Errors returns a slice containing zero or more errors that the supplied -// error is composed of. If the error is nil, the returned slice is empty. +// error is composed of. If the error is nil, a nil slice is returned. // // err := multierr.Append(r.Close(), w.Close()) // errors := multierr.Errors(err) diff --git a/vendor/go.uber.org/multierr/go.mod b/vendor/go.uber.org/multierr/go.mod index 5463fac72..58d5f90bb 100644 --- a/vendor/go.uber.org/multierr/go.mod +++ b/vendor/go.uber.org/multierr/go.mod @@ -4,7 +4,7 @@ go 1.12 require ( github.com/stretchr/testify v1.3.0 - go.uber.org/atomic v1.5.0 + go.uber.org/atomic v1.6.0 go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee golang.org/x/lint v0.0.0-20190930215403-16217165b5de golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 // indirect diff --git a/vendor/go.uber.org/multierr/go.sum b/vendor/go.uber.org/multierr/go.sum index b460913d2..557fbba28 100644 --- a/vendor/go.uber.org/multierr/go.sum +++ b/vendor/go.uber.org/multierr/go.sum @@ -15,8 +15,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= diff --git a/vendor/go.uber.org/multierr/tools.go b/vendor/go.uber.org/multierr/tools.go deleted file mode 100644 index df93f0723..000000000 --- a/vendor/go.uber.org/multierr/tools.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2019 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// +build tools - -package multierr - -import ( - // Tools we use during development. - _ "go.uber.org/tools/update-license" - _ "golang.org/x/lint/golint" - _ "honnef.co/go/tools/cmd/staticcheck" -) diff --git a/vendor/go.uber.org/tools/LICENSE b/vendor/go.uber.org/tools/LICENSE deleted file mode 100644 index 858e02475..000000000 --- a/vendor/go.uber.org/tools/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2017 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/go.uber.org/tools/update-license/.gitignore b/vendor/go.uber.org/tools/update-license/.gitignore deleted file mode 100644 index b167772c8..000000000 --- a/vendor/go.uber.org/tools/update-license/.gitignore +++ /dev/null @@ -1 +0,0 @@ -update-license diff --git a/vendor/go.uber.org/tools/update-license/README.md b/vendor/go.uber.org/tools/update-license/README.md deleted file mode 100644 index 5887df1dd..000000000 --- a/vendor/go.uber.org/tools/update-license/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# update-license - -This is a small tool that updates the license header for Uber's open source Golang files. - -## Installation - -``` -go get go.uber.org/tools/update-license -``` - -## Usage - -``` -update-license go_files... -``` - -## Further Work - -* Support more licenses by name (MIT, Apache 2.0, etc), file path, url (http GET) -* Support custom owner (not just "Uber Technologies, Inc.") -* Support more languages than go (cover go, java, js, py to start, along with LICENSE, LICENSE.txt) -* Talk about removing custom logic for header comments (ie `@generated`, `Code generated by`), it probably makes more sense just to put the license at the top -* Better detection support for existing licenses so they can be removed -* Verbose, dry run support diff --git a/vendor/go.uber.org/tools/update-license/licenses.go b/vendor/go.uber.org/tools/update-license/licenses.go deleted file mode 100644 index 76957c21e..000000000 --- a/vendor/go.uber.org/tools/update-license/licenses.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2019 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package main - -var licenseTemplates = map[string]string{ - "Apache-2.0": `// Copyright {{.Year}} {{.Owner}} -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License.`, - "MIT": `// Copyright (c) {{.Year}} {{.Owner}} -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE.`, -} diff --git a/vendor/go.uber.org/tools/update-license/main.go b/vendor/go.uber.org/tools/update-license/main.go deleted file mode 100644 index 269fd9b47..000000000 --- a/vendor/go.uber.org/tools/update-license/main.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright (c) 2019 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package main - -import ( - "bytes" - "flag" - "fmt" - "html/template" - "io/ioutil" - "log" - "os" - "path/filepath" - "sort" - "strings" - "time" -) - -const ( - // how many lines to check for an existing copyright - // this logic is not great and we should probably do something else - // but this was copied from the python script - copyrightLineLimit = 5 - headerPrefix = "// Copyright" -) - -var ( - flagDryRun = flag.Bool("dry", false, "Do not edit files and just print out what files would be edited") - flagOwner = flag.String("owner", "Uber Technologies, Inc.", "Copyright owner") - flagLicense = flag.String( - "license", - "MIT", - fmt.Sprintf( - "Type of license to use [%s]", - strings.Join(validLicenses(), ", "), - ), - ) - - lineSkipPrefixes = []string{ - "// Code generated by", - "// @generated", - } -) - -func main() { - log.SetFlags(0) - log.SetOutput(os.Stdout) - log.SetPrefix("") - if err := do(); err != nil { - log.Fatal(err) - } -} - -func do() error { - flag.Parse() - - if len(flag.Args()) < 1 { - return fmt.Errorf("usage: %s GO_FILES", os.Args[0]) - } - - return updateFiles( - flag.Args(), - time.Now().UTC().Year(), - *flagLicense, - *flagOwner, - *flagDryRun, - ) -} - -func fullLicense(ts string, year int, owner string) string { - var buf bytes.Buffer - t, err := template.New("").Parse(ts) - if err != nil { - log.Panic("failed to parse license template", err) - } - - data := struct { - Year int - Owner string - }{year, owner} - if err := t.Execute(&buf, data); err != nil { - log.Panic("failed to execture license template", err) - } - - return strings.TrimSpace(buf.String()) -} - -// validLicenses grabs all the license templates from the folder -func validLicenses() []string { - res := make([]string, 0, len(licenseTemplates)) - - for k := range licenseTemplates { - res = append(res, k) - } - - sort.Strings(res) - return res -} - -func updateFiles( - filePaths []string, - year int, - license string, - owner string, - dryRun bool, -) error { - if err := checkFilePaths(filePaths); err != nil { - return err - } - for _, filePath := range filePaths { - if err := updateFile(filePath, year, license, owner, dryRun); err != nil { - return err - } - } - return nil -} - -func checkFilePaths(filePaths []string) error { - for _, filePath := range filePaths { - if filepath.Ext(filePath) != ".go" { - return fmt.Errorf("%s is not a go file", filePath) - } - } - return nil -} - -func updateFile( - filePath string, - year int, - license string, - owner string, - dryRun bool, -) error { - data, err := ioutil.ReadFile(filePath) - if err != nil { - return err - } - newData := updateData(data, year, license, owner) - if !bytes.Equal(data, newData) { - if dryRun { - log.Print(filePath) - return nil - } - // we could do something more complicated so that we do not - // need to pass 0644 as the file mode, but in this case it should - // never actually be used to create a file since we know the file - // already exists, and it's easier to use the ReadFile/WriteFile - // logic as it is right now, and since this is just a generation - // program, this should be acceptable - return ioutil.WriteFile(filePath, newData, 0644) - } - return nil -} - -func updateData( - data []byte, - year int, - license string, - owner string, -) []byte { - licenseText := fullLicense(string(licenseTemplates[license]), year, owner) - - return []byte( - strings.Join( - updateLines(strings.Split(string(data), "\n"), licenseText), - "\n", - ), - ) -} - -// a value in the returned slice may contain newlines itself -func updateLines(lines []string, license string) []string { - for i, line := range lines { - if i >= copyrightLineLimit { - break - } - if strings.HasPrefix(line, headerPrefix) { - // assume that the new license text always starts with the copyright - // string. Pretty safe to assume, right? RIGHT? - lines[i] = strings.Split(license, "\n")[0] - return lines - } - } - return addToLines(lines, license) -} - -// a value in the returned slice may contain newlines itself -func addToLines(lines []string, license string) []string { - i := 0 - for len(lines) > i && lineContainsSkipPrefix(lines[i]) { - i++ - // skip comments under the generated line too - for strings.HasPrefix(lines[i], "//") { - i++ - } - } - if i == 0 { - return append([]string{license, ""}, lines...) - } - return append(lines[0:i], append([]string{"", license}, lines[i:]...)...) -} - -func lineContainsSkipPrefix(line string) bool { - for _, skipPrefix := range lineSkipPrefixes { - if strings.HasPrefix(line, skipPrefix) { - return true - } - } - return false -} diff --git a/vendor/go.uber.org/zap/.travis.yml b/vendor/go.uber.org/zap/.travis.yml index 647b4ee43..cfdc69f41 100644 --- a/vendor/go.uber.org/zap/.travis.yml +++ b/vendor/go.uber.org/zap/.travis.yml @@ -9,8 +9,8 @@ env: matrix: include: - - go: 1.12.x - go: 1.13.x + - go: 1.14.x env: LINT=1 script: diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index bebdb748d..aeff90e4e 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -1,5 +1,43 @@ # Changelog +## 1.15.0 (23 Apr 2020) + +Bugfixes: +* [#804][]: Fix handling of `Time` values out of `UnixNano` range. +* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`. + +Enhancements: +* [#806][]: Add `WithCaller` option to supersede the `AddCaller` option. This + allows disabling annotation of log entries with caller information if + previously enabled with `AddCaller`. +* [#813][]: Deprecate `NewSampler` constructor in favor of + `NewSamplerWithOptions` which supports a `SamplerHook` option. This option + adds support for monitoring sampling decisions through a hook. + +Thanks to @danielbprice for their contributions to this release. + +## 1.14.1 (14 Mar 2020) + +Bugfixes: +* [#791][]: Fix panic on attempting to build a logger with an invalid Config. +* [#795][]: Vendoring Zap with `go mod vendor` no longer includes Zap's + development-time dependencies. +* [#799][]: Fix issue introduced in 1.14.0 that caused invalid JSON output to + be generated for arrays of `time.Time` objects when using string-based time + formats. + +Thanks to @YashishDua for their contributions to this release. + +## 1.14.0 (20 Feb 2020) + +Enhancements: +* [#771][]: Optimize calls for disabled log levels. +* [#773][]: Add millisecond duration encoder. +* [#775][]: Add option to increase the level of a logger. +* [#786][]: Optimize time formatters using `Time.AppendFormat` where possible. + +Thanks to @caibirdme for their contributions to this release. + ## 1.13.0 (13 Nov 2019) Enhancements: @@ -350,3 +388,14 @@ upgrade to the upcoming stable release. [#736]: https://github.com/uber-go/zap/pull/736 [#751]: https://github.com/uber-go/zap/pull/751 [#758]: https://github.com/uber-go/zap/pull/758 +[#771]: https://github.com/uber-go/zap/pull/771 +[#773]: https://github.com/uber-go/zap/pull/773 +[#775]: https://github.com/uber-go/zap/pull/775 +[#786]: https://github.com/uber-go/zap/pull/786 +[#791]: https://github.com/uber-go/zap/pull/791 +[#795]: https://github.com/uber-go/zap/pull/795 +[#799]: https://github.com/uber-go/zap/pull/799 +[#804]: https://github.com/uber-go/zap/pull/804 +[#812]: https://github.com/uber-go/zap/pull/812 +[#806]: https://github.com/uber-go/zap/pull/806 +[#813]: https://github.com/uber-go/zap/pull/813 diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile index 21e436c45..dfaf6406e 100644 --- a/vendor/go.uber.org/zap/Makefile +++ b/vendor/go.uber.org/zap/Makefile @@ -1,6 +1,7 @@ export GOBIN ?= $(shell pwd)/bin GOLINT = $(GOBIN)/golint +STATICCHECK = $(GOBIN)/staticcheck BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem # Directories containing independent Go modules. @@ -17,7 +18,7 @@ GO_FILES := $(shell \ all: lint test .PHONY: lint -lint: $(GOLINT) +lint: $(GOLINT) $(STATICCHECK) @rm -rf lint.log @echo "Checking formatting..." @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log @@ -25,6 +26,8 @@ lint: $(GOLINT) @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log @echo "Checking lint..." @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log + @echo "Checking staticcheck..." + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log @echo "Checking for unresolved FIXMEs..." @git grep -i fixme | grep -v -e Makefile | tee -a lint.log @echo "Checking for license headers..." @@ -34,6 +37,9 @@ lint: $(GOLINT) $(GOLINT): go install golang.org/x/lint/golint +$(STATICCHECK): + go install honnef.co/go/tools/cmd/staticcheck + .PHONY: test test: @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go index 7592e8c63..3f4b86e08 100644 --- a/vendor/go.uber.org/zap/buffer/buffer.go +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -23,7 +23,10 @@ // package's zero-allocation formatters. package buffer // import "go.uber.org/zap/buffer" -import "strconv" +import ( + "strconv" + "time" +) const _size = 1024 // by default, create 1 KiB buffers @@ -49,6 +52,11 @@ func (b *Buffer) AppendInt(i int64) { b.bs = strconv.AppendInt(b.bs, i, 10) } +// AppendTime appends the time formatted using the specified layout. +func (b *Buffer) AppendTime(t time.Time, layout string) { + b.bs = t.AppendFormat(b.bs, layout) +} + // AppendUint appends an unsigned integer to the underlying buffer (assuming // base 10). func (b *Buffer) AppendUint(i uint64) { diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go index 6fe17d9e0..192fd1a94 100644 --- a/vendor/go.uber.org/zap/config.go +++ b/vendor/go.uber.org/zap/config.go @@ -21,6 +21,7 @@ package zap import ( + "fmt" "sort" "time" @@ -31,10 +32,14 @@ import ( // global CPU and I/O load that logging puts on your process while attempting // to preserve a representative subset of your logs. // -// Values configured here are per-second. See zapcore.NewSampler for details. +// If specified, the Sampler will invoke the Hook after each decision. +// +// Values configured here are per-second. See zapcore.NewSamplerWithOptions for +// details. type SamplingConfig struct { - Initial int `json:"initial" yaml:"initial"` - Thereafter int `json:"thereafter" yaml:"thereafter"` + Initial int `json:"initial" yaml:"initial"` + Thereafter int `json:"thereafter" yaml:"thereafter"` + Hook func(zapcore.Entry, zapcore.SamplingDecision) `json:"-" yaml:"-"` } // Config offers a declarative way to construct a logger. It doesn't do @@ -174,6 +179,10 @@ func (cfg Config) Build(opts ...Option) (*Logger, error) { return nil, err } + if cfg.Level == (AtomicLevel{}) { + return nil, fmt.Errorf("missing Level") + } + log := New( zapcore.NewCore(enc, sink, cfg.Level), cfg.buildOptions(errSink)..., @@ -203,9 +212,19 @@ func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { opts = append(opts, AddStacktrace(stackLevel)) } - if cfg.Sampling != nil { + if scfg := cfg.Sampling; scfg != nil { opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { - return zapcore.NewSampler(core, time.Second, int(cfg.Sampling.Initial), int(cfg.Sampling.Thereafter)) + var samplerOpts []zapcore.SamplerOption + if scfg.Hook != nil { + samplerOpts = append(samplerOpts, zapcore.SamplerHook(scfg.Hook)) + } + return zapcore.NewSamplerWithOptions( + core, + time.Second, + cfg.Sampling.Initial, + cfg.Sampling.Thereafter, + samplerOpts..., + ) })) } diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go index 2e9d3c341..08ed83354 100644 --- a/vendor/go.uber.org/zap/encoder.go +++ b/vendor/go.uber.org/zap/encoder.go @@ -62,6 +62,10 @@ func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapco } func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil { + return nil, fmt.Errorf("missing EncodeTime in EncoderConfig") + } + _encoderMutex.RLock() defer _encoderMutex.RUnlock() if name == "" { diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go index 83c1ea245..dd558fc23 100644 --- a/vendor/go.uber.org/zap/field.go +++ b/vendor/go.uber.org/zap/field.go @@ -32,6 +32,11 @@ import ( // improves the navigability of this package's API documentation. type Field = zapcore.Field +var ( + _minTimeInt64 = time.Unix(0, math.MinInt64) + _maxTimeInt64 = time.Unix(0, math.MaxInt64) +) + // Skip constructs a no-op field, which is often useful when handling invalid // inputs in other Field constructors. func Skip() Field { @@ -339,6 +344,9 @@ func Stringer(key string, val fmt.Stringer) Field { // Time constructs a Field with the given key and value. The encoder // controls how the time is serialized. func Time(key string, val time.Time) Field { + if val.Before(_minTimeInt64) || val.After(_maxTimeInt64) { + return Field{Key: key, Type: zapcore.TimeFullType, Interface: val} + } return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} } diff --git a/vendor/go.uber.org/zap/go.mod b/vendor/go.uber.org/zap/go.mod index 1fb6bba0b..118abda15 100644 --- a/vendor/go.uber.org/zap/go.mod +++ b/vendor/go.uber.org/zap/go.mod @@ -5,7 +5,8 @@ go 1.13 require ( github.com/pkg/errors v0.8.1 github.com/stretchr/testify v1.4.0 - go.uber.org/atomic v1.5.0 - go.uber.org/multierr v1.3.0 + go.uber.org/atomic v1.6.0 + go.uber.org/multierr v1.5.0 golang.org/x/lint v0.0.0-20190930215403-16217165b5de + honnef.co/go/tools v0.0.1-2019.2.3 ) diff --git a/vendor/go.uber.org/zap/go.sum b/vendor/go.uber.org/zap/go.sum index 9ff6735d8..99cdb93ea 100644 --- a/vendor/go.uber.org/zap/go.sum +++ b/vendor/go.uber.org/zap/go.sum @@ -20,10 +20,10 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/multierr v1.3.0 h1:sFPn2GLc3poCkfrpIXGhBD2X0CMIo4Q/zSULXrj/+uc= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index dc8f6e3a4..cd6e19551 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -258,6 +258,12 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // (e.g., Check, Info, Fatal). const callerSkipOffset = 2 + // Check the level first to reduce the cost of disabled log calls. + // Since Panic and higher may exit, we skip the optimization for those levels. + if lvl < zapcore.DPanicLevel && !log.core.Enabled(lvl) { + return nil + } + // Create basic checked entry thru the core; this will be non-nil if the // log message will actually be written somewhere. ent := zapcore.Entry{ diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index 7a6b0fca1..59f1b54a0 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -20,7 +20,11 @@ package zap -import "go.uber.org/zap/zapcore" +import ( + "fmt" + + "go.uber.org/zap/zapcore" +) // An Option configures a Logger. type Option interface { @@ -83,10 +87,17 @@ func Development() Option { } // AddCaller configures the Logger to annotate each message with the filename -// and line number of zap's caller. +// and line number of zap's caller. See also WithCaller. func AddCaller() Option { + return WithCaller(true) +} + +// WithCaller configures the Logger to annotate each message with the filename +// and line number of zap's caller, or not, depending on the value of enabled. +// This is a generalized form of AddCaller. +func WithCaller(enabled bool) Option { return optionFunc(func(log *Logger) { - log.addCaller = true + log.addCaller = enabled }) } @@ -107,3 +118,16 @@ func AddStacktrace(lvl zapcore.LevelEnabler) Option { log.addStack = lvl }) } + +// IncreaseLevel increase the level of the logger. It has no effect if +// the passed in level tries to decrease the level of the logger. +func IncreaseLevel(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + core, err := zapcore.NewIncreaseLevelCore(log.core, lvl) + if err != nil { + fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v", err) + } else { + log.core = core + } + }) +} diff --git a/vendor/go.uber.org/zap/tools.go b/vendor/go.uber.org/zap/tools.go deleted file mode 100644 index 2b6366bea..000000000 --- a/vendor/go.uber.org/zap/tools.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2019 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// +build tools - -package zap - -import ( - // Tools we use during development. - _ "golang.org/x/lint/golint" -) diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go index 5e0a69be5..6c78f7e49 100644 --- a/vendor/go.uber.org/zap/zapcore/encoder.go +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -112,21 +112,43 @@ func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { enc.AppendInt64(t.UnixNano()) } +func encodeTimeLayout(t time.Time, layout string, enc PrimitiveArrayEncoder) { + type appendTimeEncoder interface { + AppendTimeLayout(time.Time, string) + } + + if enc, ok := enc.(appendTimeEncoder); ok { + enc.AppendTimeLayout(t, layout) + return + } + + enc.AppendString(t.Format(layout)) +} + // ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string // with millisecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - enc.AppendString(t.Format("2006-01-02T15:04:05.000Z0700")) + encodeTimeLayout(t, "2006-01-02T15:04:05.000Z0700", enc) } // RFC3339TimeEncoder serializes a time.Time to an RFC3339-formatted string. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. func RFC3339TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - enc.AppendString(t.Format(time.RFC3339)) + encodeTimeLayout(t, time.RFC3339, enc) } // RFC3339NanoTimeEncoder serializes a time.Time to an RFC3339-formatted string // with nanosecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. func RFC3339NanoTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - enc.AppendString(t.Format(time.RFC3339Nano)) + encodeTimeLayout(t, time.RFC3339Nano, enc) } // UnmarshalText unmarshals text to a TimeEncoder. @@ -168,6 +190,12 @@ func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { enc.AppendInt64(int64(d)) } +// MillisDurationEncoder serializes a time.Duration to an integer number of +// milliseconds elapsed. +func MillisDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(d.Nanoseconds() / 1e6) +} + // StringDurationEncoder serializes a time.Duration using its built-in String // method. func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { @@ -183,6 +211,8 @@ func (e *DurationEncoder) UnmarshalText(text []byte) error { *e = StringDurationEncoder case "nanos": *e = NanosDurationEncoder + case "ms": + *e = MillisDurationEncoder default: *e = SecondsDurationEncoder } diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go index a67c7bacc..9ba2272c3 100644 --- a/vendor/go.uber.org/zap/zapcore/error.go +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -66,11 +66,6 @@ type errorGroup interface { Errors() []error } -type causer interface { - // Provides access to the error that caused this error. - Cause() error -} - // Note that errArry and errArrayElem are very similar to the version // implemented in the top-level error.go file. We can't re-use this because // that would require exporting errArray as part of the zapcore API. diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go index ae772e4a1..6e05f831f 100644 --- a/vendor/go.uber.org/zap/zapcore/field.go +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -65,8 +65,11 @@ const ( Int8Type // StringType indicates that the field carries a string. StringType - // TimeType indicates that the field carries a time.Time. + // TimeType indicates that the field carries a time.Time that is + // representable by a UnixNano() stored as an int64. TimeType + // TimeFullType indicates that the field carries a time.Time stored as-is. + TimeFullType // Uint64Type indicates that the field carries a uint64. Uint64Type // Uint32Type indicates that the field carries a uint32. @@ -145,6 +148,8 @@ func (f Field) AddTo(enc ObjectEncoder) { // Fall back to UTC if location is nil. enc.AddTime(f.Key, time.Unix(0, f.Integer)) } + case TimeFullType: + enc.AddTime(f.Key, f.Interface.(time.Time)) case Uint64Type: enc.AddUint64(f.Key, uint64(f.Integer)) case Uint32Type: diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go new file mode 100644 index 000000000..5a1749261 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/increase_level.go @@ -0,0 +1,66 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "fmt" + +type levelFilterCore struct { + core Core + level LevelEnabler +} + +// NewIncreaseLevelCore creates a core that can be used to increase the level of +// an existing Core. It cannot be used to decrease the logging level, as it acts +// as a filter before calling the underlying core. If level decreases the log level, +// an error is returned. +func NewIncreaseLevelCore(core Core, level LevelEnabler) (Core, error) { + for l := _maxLevel; l >= _minLevel; l-- { + if !core.Enabled(l) && level.Enabled(l) { + return nil, fmt.Errorf("invalid increase level, as level %q is allowed by increased level, but not by existing core", l) + } + } + + return &levelFilterCore{core, level}, nil +} + +func (c *levelFilterCore) Enabled(lvl Level) bool { + return c.level.Enabled(lvl) +} + +func (c *levelFilterCore) With(fields []Field) Core { + return &levelFilterCore{c.core.With(fields), c.level} +} + +func (c *levelFilterCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !c.Enabled(ent.Level) { + return ce + } + + return c.core.Check(ent, ce) +} + +func (c *levelFilterCore) Write(ent Entry, fields []Field) error { + return c.core.Write(ent, fields) +} + +func (c *levelFilterCore) Sync() error { + return c.core.Sync() +} diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index 56256be8d..7facc1b36 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -266,6 +266,13 @@ func (enc *jsonEncoder) AppendString(val string) { enc.buf.AppendByte('"') } +func (enc *jsonEncoder) AppendTimeLayout(time time.Time, layout string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.buf.AppendTime(time, layout) + enc.buf.AppendByte('"') +} + func (enc *jsonEncoder) AppendTime(val time.Time) { cur := enc.buf.Len() enc.EncodeTime(val, enc) diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go index e31641863..25f10ca1d 100644 --- a/vendor/go.uber.org/zap/zapcore/sampler.go +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -81,33 +81,104 @@ func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { return 1 } -type sampler struct { - Core +// SamplingDecision is a decision represented as a bit field made by sampler. +// More decisions may be added in the future. +type SamplingDecision uint32 - counts *counters - tick time.Duration - first, thereafter uint64 +const ( + // LogDropped indicates that the Sampler dropped a log entry. + LogDropped SamplingDecision = 1 << iota + // LogSampled indicates that the Sampler sampled a log entry. + LogSampled +) + +// optionFunc wraps a func so it satisfies the SamplerOption interface. +type optionFunc func(*sampler) + +func (f optionFunc) apply(s *sampler) { + f(s) +} + +// SamplerOption configures a Sampler. +type SamplerOption interface { + apply(*sampler) } -// NewSampler creates a Core that samples incoming entries, which caps the CPU -// and I/O load of logging while attempting to preserve a representative subset -// of your logs. +// nopSamplingHook is the default hook used by sampler. +func nopSamplingHook(Entry, SamplingDecision) {} + +// SamplerHook registers a function which will be called when Sampler makes a +// decision. +// +// This hook may be used to get visibility into the performance of the sampler. +// For example, use it to track metrics of dropped versus sampled logs. +// +// var dropped atomic.Int64 +// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) { +// if dec&zapcore.LogDropped > 0 { +// dropped.Inc() +// } +// }) +func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption { + return optionFunc(func(s *sampler) { + s.hook = hook + }) +} + +// NewSamplerWithOptions creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. // // Zap samples by logging the first N entries with a given level and message // each tick. If more Entries with the same level and message are seen during // the same interval, every Mth message is logged and the rest are dropped. // +// Sampler can be configured to report sampling decisions with the SamplerHook +// option. +// // Keep in mind that zap's sampling implementation is optimized for speed over // absolute precision; under load, each tick may be slightly over- or // under-sampled. -func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { - return &sampler{ +func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core { + s := &sampler{ Core: core, tick: tick, counts: newCounters(), first: uint64(first), thereafter: uint64(thereafter), + hook: nopSamplingHook, } + for _, opt := range opts { + opt.apply(s) + } + + return s +} + +type sampler struct { + Core + + counts *counters + tick time.Duration + first, thereafter uint64 + hook func(Entry, SamplingDecision) +} + +// NewSampler creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// Keep in mind that zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +// +// Deprecated: use NewSamplerWithOptions. +func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { + return NewSamplerWithOptions(core, tick, first, thereafter) } func (s *sampler) With(fields []Field) Core { @@ -117,6 +188,7 @@ func (s *sampler) With(fields []Field) Core { counts: s.counts, first: s.first, thereafter: s.thereafter, + hook: s.hook, } } @@ -128,7 +200,9 @@ func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { counter := s.counts.get(ent.Level, ent.Message) n := counter.IncCheckReset(ent.Time, s.tick) if n > s.first && (n-s.first)%s.thereafter != 0 { + s.hook(ent, LogDropped) return ce } + s.hook(ent, LogSampled) return s.Core.Check(ent, ce) } diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go index d1b4fca3a..2ffb97bfb 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go @@ -113,6 +113,7 @@ func NewTerminal(c io.ReadWriter, prompt string) *Terminal { } const ( + keyCtrlC = 3 keyCtrlD = 4 keyCtrlU = 21 keyEnter = '\r' @@ -151,8 +152,12 @@ func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { switch b[0] { case 1: // ^A return keyHome, b[1:] + case 2: // ^B + return keyLeft, b[1:] case 5: // ^E return keyEnd, b[1:] + case 6: // ^F + return keyRight, b[1:] case 8: // ^H return keyBackspace, b[1:] case 11: // ^K @@ -738,6 +743,9 @@ func (t *Terminal) readLine() (line string, err error) { return "", io.EOF } } + if key == keyCtrlC { + return "", io.EOF + } if key == keyPasteStart { t.pasteActive = true if len(t.line) == 0 { diff --git a/vendor/golang.org/x/lint/.travis.yml b/vendor/golang.org/x/lint/.travis.yml deleted file mode 100644 index 50553ebd0..000000000 --- a/vendor/golang.org/x/lint/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -sudo: false -language: go -go: - - 1.10.x - - 1.11.x - - master - -go_import_path: golang.org/x/lint - -install: - - go get -t -v ./... - -script: - - go test -v -race ./... - -matrix: - allow_failures: - - go: master - fast_finish: true diff --git a/vendor/golang.org/x/lint/CONTRIBUTING.md b/vendor/golang.org/x/lint/CONTRIBUTING.md deleted file mode 100644 index 1fadda62d..000000000 --- a/vendor/golang.org/x/lint/CONTRIBUTING.md +++ /dev/null @@ -1,15 +0,0 @@ -# Contributing to Golint - -## Before filing an issue: - -### Are you having trouble building golint? - -Check you have the latest version of its dependencies. Run -``` -go get -u golang.org/x/lint/golint -``` -If you still have problems, consider searching for existing issues before filing a new issue. - -## Before sending a pull request: - -Have you understood the purpose of golint? Make sure to carefully read `README`. diff --git a/vendor/golang.org/x/lint/README.md b/vendor/golang.org/x/lint/README.md deleted file mode 100644 index 4968b13ae..000000000 --- a/vendor/golang.org/x/lint/README.md +++ /dev/null @@ -1,88 +0,0 @@ -Golint is a linter for Go source code. - -[![Build Status](https://travis-ci.org/golang/lint.svg?branch=master)](https://travis-ci.org/golang/lint) - -## Installation - -Golint requires a -[supported release of Go](https://golang.org/doc/devel/release.html#policy). - - go get -u golang.org/x/lint/golint - -To find out where `golint` was installed you can run `go list -f {{.Target}} golang.org/x/lint/golint`. For `golint` to be used globally add that directory to the `$PATH` environment setting. - -## Usage - -Invoke `golint` with one or more filenames, directories, or packages named -by its import path. Golint uses the same -[import path syntax](https://golang.org/cmd/go/#hdr-Import_path_syntax) as -the `go` command and therefore -also supports relative import paths like `./...`. Additionally the `...` -wildcard can be used as suffix on relative and absolute file paths to recurse -into them. - -The output of this tool is a list of suggestions in Vim quickfix format, -which is accepted by lots of different editors. - -## Purpose - -Golint differs from gofmt. Gofmt reformats Go source code, whereas -golint prints out style mistakes. - -Golint differs from govet. Govet is concerned with correctness, whereas -golint is concerned with coding style. Golint is in use at Google, and it -seeks to match the accepted style of the open source Go project. - -The suggestions made by golint are exactly that: suggestions. -Golint is not perfect, and has both false positives and false negatives. -Do not treat its output as a gold standard. We will not be adding pragmas -or other knobs to suppress specific warnings, so do not expect or require -code to be completely "lint-free". -In short, this tool is not, and will never be, trustworthy enough for its -suggestions to be enforced automatically, for example as part of a build process. -Golint makes suggestions for many of the mechanically checkable items listed in -[Effective Go](https://golang.org/doc/effective_go.html) and the -[CodeReviewComments wiki page](https://golang.org/wiki/CodeReviewComments). - -## Scope - -Golint is meant to carry out the stylistic conventions put forth in -[Effective Go](https://golang.org/doc/effective_go.html) and -[CodeReviewComments](https://golang.org/wiki/CodeReviewComments). -Changes that are not aligned with those documents will not be considered. - -## Contributions - -Contributions to this project are welcome provided they are [in scope](#scope), -though please send mail before starting work on anything major. -Contributors retain their copyright, so we need you to fill out -[a short form](https://developers.google.com/open-source/cla/individual) -before we can accept your contribution. - -## Vim - -Add this to your ~/.vimrc: - - set rtp+=$GOPATH/src/golang.org/x/lint/misc/vim - -If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value. - -Running `:Lint` will run golint on the current file and populate the quickfix list. - -Optionally, add this to your `~/.vimrc` to automatically run `golint` on `:w` - - autocmd BufWritePost,FileWritePost *.go execute 'Lint' | cwindow - - -## Emacs - -Add this to your `.emacs` file: - - (add-to-list 'load-path (concat (getenv "GOPATH") "/src/golang.org/x/lint/misc/emacs/")) - (require 'golint) - -If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value. - -Running M-x golint will run golint on the current file. - -For more usage, see [Compilation-Mode](http://www.gnu.org/software/emacs/manual/html_node/emacs/Compilation-Mode.html). diff --git a/vendor/golang.org/x/lint/go.mod b/vendor/golang.org/x/lint/go.mod deleted file mode 100644 index 44179f3a4..000000000 --- a/vendor/golang.org/x/lint/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module golang.org/x/lint - -go 1.11 - -require golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f diff --git a/vendor/golang.org/x/lint/go.sum b/vendor/golang.org/x/lint/go.sum deleted file mode 100644 index 539c98a94..000000000 --- a/vendor/golang.org/x/lint/go.sum +++ /dev/null @@ -1,8 +0,0 @@ -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f h1:kDxGY2VmgABOe55qheT/TFqUMtcTHnomIPS1iv3G4Ms= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/golang.org/x/lint/golint/golint.go b/vendor/golang.org/x/lint/golint/golint.go deleted file mode 100644 index ac024b6d2..000000000 --- a/vendor/golang.org/x/lint/golint/golint.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright (c) 2013 The Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd. - -// golint lints the Go source files named on its command line. -package main - -import ( - "flag" - "fmt" - "go/build" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "golang.org/x/lint" -) - -var ( - minConfidence = flag.Float64("min_confidence", 0.8, "minimum confidence of a problem to print it") - setExitStatus = flag.Bool("set_exit_status", false, "set exit status to 1 if any issues are found") - suggestions int -) - -func usage() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - fmt.Fprintf(os.Stderr, "\tgolint [flags] # runs on package in current directory\n") - fmt.Fprintf(os.Stderr, "\tgolint [flags] [packages]\n") - fmt.Fprintf(os.Stderr, "\tgolint [flags] [directories] # where a '/...' suffix includes all sub-directories\n") - fmt.Fprintf(os.Stderr, "\tgolint [flags] [files] # all must belong to a single package\n") - fmt.Fprintf(os.Stderr, "Flags:\n") - flag.PrintDefaults() -} - -func main() { - flag.Usage = usage - flag.Parse() - - if flag.NArg() == 0 { - lintDir(".") - } else { - // dirsRun, filesRun, and pkgsRun indicate whether golint is applied to - // directory, file or package targets. The distinction affects which - // checks are run. It is no valid to mix target types. - var dirsRun, filesRun, pkgsRun int - var args []string - for _, arg := range flag.Args() { - if strings.HasSuffix(arg, "/...") && isDir(arg[:len(arg)-len("/...")]) { - dirsRun = 1 - for _, dirname := range allPackagesInFS(arg) { - args = append(args, dirname) - } - } else if isDir(arg) { - dirsRun = 1 - args = append(args, arg) - } else if exists(arg) { - filesRun = 1 - args = append(args, arg) - } else { - pkgsRun = 1 - args = append(args, arg) - } - } - - if dirsRun+filesRun+pkgsRun != 1 { - usage() - os.Exit(2) - } - switch { - case dirsRun == 1: - for _, dir := range args { - lintDir(dir) - } - case filesRun == 1: - lintFiles(args...) - case pkgsRun == 1: - for _, pkg := range importPaths(args) { - lintPackage(pkg) - } - } - } - - if *setExitStatus && suggestions > 0 { - fmt.Fprintf(os.Stderr, "Found %d lint suggestions; failing.\n", suggestions) - os.Exit(1) - } -} - -func isDir(filename string) bool { - fi, err := os.Stat(filename) - return err == nil && fi.IsDir() -} - -func exists(filename string) bool { - _, err := os.Stat(filename) - return err == nil -} - -func lintFiles(filenames ...string) { - files := make(map[string][]byte) - for _, filename := range filenames { - src, err := ioutil.ReadFile(filename) - if err != nil { - fmt.Fprintln(os.Stderr, err) - continue - } - files[filename] = src - } - - l := new(lint.Linter) - ps, err := l.LintFiles(files) - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - return - } - for _, p := range ps { - if p.Confidence >= *minConfidence { - fmt.Printf("%v: %s\n", p.Position, p.Text) - suggestions++ - } - } -} - -func lintDir(dirname string) { - pkg, err := build.ImportDir(dirname, 0) - lintImportedPackage(pkg, err) -} - -func lintPackage(pkgname string) { - pkg, err := build.Import(pkgname, ".", 0) - lintImportedPackage(pkg, err) -} - -func lintImportedPackage(pkg *build.Package, err error) { - if err != nil { - if _, nogo := err.(*build.NoGoError); nogo { - // Don't complain if the failure is due to no Go source files. - return - } - fmt.Fprintln(os.Stderr, err) - return - } - - var files []string - files = append(files, pkg.GoFiles...) - files = append(files, pkg.CgoFiles...) - files = append(files, pkg.TestGoFiles...) - if pkg.Dir != "." { - for i, f := range files { - files[i] = filepath.Join(pkg.Dir, f) - } - } - // TODO(dsymonds): Do foo_test too (pkg.XTestGoFiles) - - lintFiles(files...) -} diff --git a/vendor/golang.org/x/lint/golint/import.go b/vendor/golang.org/x/lint/golint/import.go deleted file mode 100644 index 2ba9dea77..000000000 --- a/vendor/golang.org/x/lint/golint/import.go +++ /dev/null @@ -1,309 +0,0 @@ -package main - -/* - -This file holds a direct copy of the import path matching code of -https://github.com/golang/go/blob/master/src/cmd/go/main.go. It can be -replaced when https://golang.org/issue/8768 is resolved. - -It has been updated to follow upstream changes in a few ways. - -*/ - -import ( - "fmt" - "go/build" - "log" - "os" - "path" - "path/filepath" - "regexp" - "runtime" - "strings" -) - -var ( - buildContext = build.Default - goroot = filepath.Clean(runtime.GOROOT()) - gorootSrc = filepath.Join(goroot, "src") -) - -// importPathsNoDotExpansion returns the import paths to use for the given -// command line, but it does no ... expansion. -func importPathsNoDotExpansion(args []string) []string { - if len(args) == 0 { - return []string{"."} - } - var out []string - for _, a := range args { - // Arguments are supposed to be import paths, but - // as a courtesy to Windows developers, rewrite \ to / - // in command-line arguments. Handles .\... and so on. - if filepath.Separator == '\\' { - a = strings.Replace(a, `\`, `/`, -1) - } - - // Put argument in canonical form, but preserve leading ./. - if strings.HasPrefix(a, "./") { - a = "./" + path.Clean(a) - if a == "./." { - a = "." - } - } else { - a = path.Clean(a) - } - if a == "all" || a == "std" { - out = append(out, allPackages(a)...) - continue - } - out = append(out, a) - } - return out -} - -// importPaths returns the import paths to use for the given command line. -func importPaths(args []string) []string { - args = importPathsNoDotExpansion(args) - var out []string - for _, a := range args { - if strings.Contains(a, "...") { - if build.IsLocalImport(a) { - out = append(out, allPackagesInFS(a)...) - } else { - out = append(out, allPackages(a)...) - } - continue - } - out = append(out, a) - } - return out -} - -// matchPattern(pattern)(name) reports whether -// name matches pattern. Pattern is a limited glob -// pattern in which '...' means 'any string' and there -// is no other special syntax. -func matchPattern(pattern string) func(name string) bool { - re := regexp.QuoteMeta(pattern) - re = strings.Replace(re, `\.\.\.`, `.*`, -1) - // Special case: foo/... matches foo too. - if strings.HasSuffix(re, `/.*`) { - re = re[:len(re)-len(`/.*`)] + `(/.*)?` - } - reg := regexp.MustCompile(`^` + re + `$`) - return func(name string) bool { - return reg.MatchString(name) - } -} - -// hasPathPrefix reports whether the path s begins with the -// elements in prefix. -func hasPathPrefix(s, prefix string) bool { - switch { - default: - return false - case len(s) == len(prefix): - return s == prefix - case len(s) > len(prefix): - if prefix != "" && prefix[len(prefix)-1] == '/' { - return strings.HasPrefix(s, prefix) - } - return s[len(prefix)] == '/' && s[:len(prefix)] == prefix - } -} - -// treeCanMatchPattern(pattern)(name) reports whether -// name or children of name can possibly match pattern. -// Pattern is the same limited glob accepted by matchPattern. -func treeCanMatchPattern(pattern string) func(name string) bool { - wildCard := false - if i := strings.Index(pattern, "..."); i >= 0 { - wildCard = true - pattern = pattern[:i] - } - return func(name string) bool { - return len(name) <= len(pattern) && hasPathPrefix(pattern, name) || - wildCard && strings.HasPrefix(name, pattern) - } -} - -// allPackages returns all the packages that can be found -// under the $GOPATH directories and $GOROOT matching pattern. -// The pattern is either "all" (all packages), "std" (standard packages) -// or a path including "...". -func allPackages(pattern string) []string { - pkgs := matchPackages(pattern) - if len(pkgs) == 0 { - fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) - } - return pkgs -} - -func matchPackages(pattern string) []string { - match := func(string) bool { return true } - treeCanMatch := func(string) bool { return true } - if pattern != "all" && pattern != "std" { - match = matchPattern(pattern) - treeCanMatch = treeCanMatchPattern(pattern) - } - - have := map[string]bool{ - "builtin": true, // ignore pseudo-package that exists only for documentation - } - if !buildContext.CgoEnabled { - have["runtime/cgo"] = true // ignore during walk - } - var pkgs []string - - // Commands - cmd := filepath.Join(goroot, "src/cmd") + string(filepath.Separator) - filepath.Walk(cmd, func(path string, fi os.FileInfo, err error) error { - if err != nil || !fi.IsDir() || path == cmd { - return nil - } - name := path[len(cmd):] - if !treeCanMatch(name) { - return filepath.SkipDir - } - // Commands are all in cmd/, not in subdirectories. - if strings.Contains(name, string(filepath.Separator)) { - return filepath.SkipDir - } - - // We use, e.g., cmd/gofmt as the pseudo import path for gofmt. - name = "cmd/" + name - if have[name] { - return nil - } - have[name] = true - if !match(name) { - return nil - } - _, err = buildContext.ImportDir(path, 0) - if err != nil { - if _, noGo := err.(*build.NoGoError); !noGo { - log.Print(err) - } - return nil - } - pkgs = append(pkgs, name) - return nil - }) - - for _, src := range buildContext.SrcDirs() { - if (pattern == "std" || pattern == "cmd") && src != gorootSrc { - continue - } - src = filepath.Clean(src) + string(filepath.Separator) - root := src - if pattern == "cmd" { - root += "cmd" + string(filepath.Separator) - } - filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { - if err != nil || !fi.IsDir() || path == src { - return nil - } - - // Avoid .foo, _foo, and testdata directory trees. - _, elem := filepath.Split(path) - if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { - return filepath.SkipDir - } - - name := filepath.ToSlash(path[len(src):]) - if pattern == "std" && (strings.Contains(name, ".") || name == "cmd") { - // The name "std" is only the standard library. - // If the name is cmd, it's the root of the command tree. - return filepath.SkipDir - } - if !treeCanMatch(name) { - return filepath.SkipDir - } - if have[name] { - return nil - } - have[name] = true - if !match(name) { - return nil - } - _, err = buildContext.ImportDir(path, 0) - if err != nil { - if _, noGo := err.(*build.NoGoError); noGo { - return nil - } - } - pkgs = append(pkgs, name) - return nil - }) - } - return pkgs -} - -// allPackagesInFS is like allPackages but is passed a pattern -// beginning ./ or ../, meaning it should scan the tree rooted -// at the given directory. There are ... in the pattern too. -func allPackagesInFS(pattern string) []string { - pkgs := matchPackagesInFS(pattern) - if len(pkgs) == 0 { - fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) - } - return pkgs -} - -func matchPackagesInFS(pattern string) []string { - // Find directory to begin the scan. - // Could be smarter but this one optimization - // is enough for now, since ... is usually at the - // end of a path. - i := strings.Index(pattern, "...") - dir, _ := path.Split(pattern[:i]) - - // pattern begins with ./ or ../. - // path.Clean will discard the ./ but not the ../. - // We need to preserve the ./ for pattern matching - // and in the returned import paths. - prefix := "" - if strings.HasPrefix(pattern, "./") { - prefix = "./" - } - match := matchPattern(pattern) - - var pkgs []string - filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { - if err != nil || !fi.IsDir() { - return nil - } - if path == dir { - // filepath.Walk starts at dir and recurses. For the recursive case, - // the path is the result of filepath.Join, which calls filepath.Clean. - // The initial case is not Cleaned, though, so we do this explicitly. - // - // This converts a path like "./io/" to "io". Without this step, running - // "cd $GOROOT/src/pkg; go list ./io/..." would incorrectly skip the io - // package, because prepending the prefix "./" to the unclean path would - // result in "././io", and match("././io") returns false. - path = filepath.Clean(path) - } - - // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..". - _, elem := filepath.Split(path) - dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." - if dot || strings.HasPrefix(elem, "_") || elem == "testdata" { - return filepath.SkipDir - } - - name := prefix + filepath.ToSlash(path) - if !match(name) { - return nil - } - if _, err = build.ImportDir(path, 0); err != nil { - if _, noGo := err.(*build.NoGoError); !noGo { - log.Print(err) - } - return nil - } - pkgs = append(pkgs, name) - return nil - }) - return pkgs -} diff --git a/vendor/golang.org/x/lint/golint/importcomment.go b/vendor/golang.org/x/lint/golint/importcomment.go deleted file mode 100644 index d5b32f734..000000000 --- a/vendor/golang.org/x/lint/golint/importcomment.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) 2018 The Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd. - -// +build go1.12 - -// Require use of the correct import path only for Go 1.12+ users, so -// any breakages coincide with people updating their CI configs or -// whatnot. - -package main // import "golang.org/x/lint/golint" diff --git a/vendor/golang.org/x/lint/lint.go b/vendor/golang.org/x/lint/lint.go deleted file mode 100644 index 532a75ad2..000000000 --- a/vendor/golang.org/x/lint/lint.go +++ /dev/null @@ -1,1614 +0,0 @@ -// Copyright (c) 2013 The Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd. - -// Package lint contains a linter for Go source code. -package lint // import "golang.org/x/lint" - -import ( - "bufio" - "bytes" - "fmt" - "go/ast" - "go/parser" - "go/printer" - "go/token" - "go/types" - "regexp" - "sort" - "strconv" - "strings" - "unicode" - "unicode/utf8" - - "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/go/gcexportdata" -) - -const styleGuideBase = "https://golang.org/wiki/CodeReviewComments" - -// A Linter lints Go source code. -type Linter struct { -} - -// Problem represents a problem in some source code. -type Problem struct { - Position token.Position // position in source file - Text string // the prose that describes the problem - Link string // (optional) the link to the style guide for the problem - Confidence float64 // a value in (0,1] estimating the confidence in this problem's correctness - LineText string // the source line - Category string // a short name for the general category of the problem - - // If the problem has a suggested fix (the minority case), - // ReplacementLine is a full replacement for the relevant line of the source file. - ReplacementLine string -} - -func (p *Problem) String() string { - if p.Link != "" { - return p.Text + "\n\n" + p.Link - } - return p.Text -} - -type byPosition []Problem - -func (p byPosition) Len() int { return len(p) } -func (p byPosition) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p byPosition) Less(i, j int) bool { - pi, pj := p[i].Position, p[j].Position - - if pi.Filename != pj.Filename { - return pi.Filename < pj.Filename - } - if pi.Line != pj.Line { - return pi.Line < pj.Line - } - if pi.Column != pj.Column { - return pi.Column < pj.Column - } - - return p[i].Text < p[j].Text -} - -// Lint lints src. -func (l *Linter) Lint(filename string, src []byte) ([]Problem, error) { - return l.LintFiles(map[string][]byte{filename: src}) -} - -// LintFiles lints a set of files of a single package. -// The argument is a map of filename to source. -func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) { - pkg := &pkg{ - fset: token.NewFileSet(), - files: make(map[string]*file), - } - var pkgName string - for filename, src := range files { - if isGenerated(src) { - continue // See issue #239 - } - f, err := parser.ParseFile(pkg.fset, filename, src, parser.ParseComments) - if err != nil { - return nil, err - } - if pkgName == "" { - pkgName = f.Name.Name - } else if f.Name.Name != pkgName { - return nil, fmt.Errorf("%s is in package %s, not %s", filename, f.Name.Name, pkgName) - } - pkg.files[filename] = &file{ - pkg: pkg, - f: f, - fset: pkg.fset, - src: src, - filename: filename, - } - } - if len(pkg.files) == 0 { - return nil, nil - } - return pkg.lint(), nil -} - -var ( - genHdr = []byte("// Code generated ") - genFtr = []byte(" DO NOT EDIT.") -) - -// isGenerated reports whether the source file is generated code -// according the rules from https://golang.org/s/generatedcode. -func isGenerated(src []byte) bool { - sc := bufio.NewScanner(bytes.NewReader(src)) - for sc.Scan() { - b := sc.Bytes() - if bytes.HasPrefix(b, genHdr) && bytes.HasSuffix(b, genFtr) && len(b) >= len(genHdr)+len(genFtr) { - return true - } - } - return false -} - -// pkg represents a package being linted. -type pkg struct { - fset *token.FileSet - files map[string]*file - - typesPkg *types.Package - typesInfo *types.Info - - // sortable is the set of types in the package that implement sort.Interface. - sortable map[string]bool - // main is whether this is a "main" package. - main bool - - problems []Problem -} - -func (p *pkg) lint() []Problem { - if err := p.typeCheck(); err != nil { - /* TODO(dsymonds): Consider reporting these errors when golint operates on entire packages. - if e, ok := err.(types.Error); ok { - pos := p.fset.Position(e.Pos) - conf := 1.0 - if strings.Contains(e.Msg, "can't find import: ") { - // Golint is probably being run in a context that doesn't support - // typechecking (e.g. package files aren't found), so don't warn about it. - conf = 0 - } - if conf > 0 { - p.errorfAt(pos, conf, category("typechecking"), e.Msg) - } - - // TODO(dsymonds): Abort if !e.Soft? - } - */ - } - - p.scanSortable() - p.main = p.isMain() - - for _, f := range p.files { - f.lint() - } - - sort.Sort(byPosition(p.problems)) - - return p.problems -} - -// file represents a file being linted. -type file struct { - pkg *pkg - f *ast.File - fset *token.FileSet - src []byte - filename string -} - -func (f *file) isTest() bool { return strings.HasSuffix(f.filename, "_test.go") } - -func (f *file) lint() { - f.lintPackageComment() - f.lintImports() - f.lintBlankImports() - f.lintExported() - f.lintNames() - f.lintElses() - f.lintRanges() - f.lintErrorf() - f.lintErrors() - f.lintErrorStrings() - f.lintReceiverNames() - f.lintIncDec() - f.lintErrorReturn() - f.lintUnexportedReturn() - f.lintTimeNames() - f.lintContextKeyTypes() - f.lintContextArgs() -} - -type link string -type category string - -// The variadic arguments may start with link and category types, -// and must end with a format string and any arguments. -// It returns the new Problem. -func (f *file) errorf(n ast.Node, confidence float64, args ...interface{}) *Problem { - pos := f.fset.Position(n.Pos()) - if pos.Filename == "" { - pos.Filename = f.filename - } - return f.pkg.errorfAt(pos, confidence, args...) -} - -func (p *pkg) errorfAt(pos token.Position, confidence float64, args ...interface{}) *Problem { - problem := Problem{ - Position: pos, - Confidence: confidence, - } - if pos.Filename != "" { - // The file might not exist in our mapping if a //line directive was encountered. - if f, ok := p.files[pos.Filename]; ok { - problem.LineText = srcLine(f.src, pos) - } - } - -argLoop: - for len(args) > 1 { // always leave at least the format string in args - switch v := args[0].(type) { - case link: - problem.Link = string(v) - case category: - problem.Category = string(v) - default: - break argLoop - } - args = args[1:] - } - - problem.Text = fmt.Sprintf(args[0].(string), args[1:]...) - - p.problems = append(p.problems, problem) - return &p.problems[len(p.problems)-1] -} - -var newImporter = func(fset *token.FileSet) types.ImporterFrom { - return gcexportdata.NewImporter(fset, make(map[string]*types.Package)) -} - -func (p *pkg) typeCheck() error { - config := &types.Config{ - // By setting a no-op error reporter, the type checker does as much work as possible. - Error: func(error) {}, - Importer: newImporter(p.fset), - } - info := &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Scopes: make(map[ast.Node]*types.Scope), - } - var anyFile *file - var astFiles []*ast.File - for _, f := range p.files { - anyFile = f - astFiles = append(astFiles, f.f) - } - pkg, err := config.Check(anyFile.f.Name.Name, p.fset, astFiles, info) - // Remember the typechecking info, even if config.Check failed, - // since we will get partial information. - p.typesPkg = pkg - p.typesInfo = info - return err -} - -func (p *pkg) typeOf(expr ast.Expr) types.Type { - if p.typesInfo == nil { - return nil - } - return p.typesInfo.TypeOf(expr) -} - -func (p *pkg) isNamedType(typ types.Type, importPath, name string) bool { - n, ok := typ.(*types.Named) - if !ok { - return false - } - tn := n.Obj() - return tn != nil && tn.Pkg() != nil && tn.Pkg().Path() == importPath && tn.Name() == name -} - -// scopeOf returns the tightest scope encompassing id. -func (p *pkg) scopeOf(id *ast.Ident) *types.Scope { - var scope *types.Scope - if obj := p.typesInfo.ObjectOf(id); obj != nil { - scope = obj.Parent() - } - if scope == p.typesPkg.Scope() { - // We were given a top-level identifier. - // Use the file-level scope instead of the package-level scope. - pos := id.Pos() - for _, f := range p.files { - if f.f.Pos() <= pos && pos < f.f.End() { - scope = p.typesInfo.Scopes[f.f] - break - } - } - } - return scope -} - -func (p *pkg) scanSortable() { - p.sortable = make(map[string]bool) - - // bitfield for which methods exist on each type. - const ( - Len = 1 << iota - Less - Swap - ) - nmap := map[string]int{"Len": Len, "Less": Less, "Swap": Swap} - has := make(map[string]int) - for _, f := range p.files { - f.walk(func(n ast.Node) bool { - fn, ok := n.(*ast.FuncDecl) - if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { - return true - } - // TODO(dsymonds): We could check the signature to be more precise. - recv := receiverType(fn) - if i, ok := nmap[fn.Name.Name]; ok { - has[recv] |= i - } - return false - }) - } - for typ, ms := range has { - if ms == Len|Less|Swap { - p.sortable[typ] = true - } - } -} - -func (p *pkg) isMain() bool { - for _, f := range p.files { - if f.isMain() { - return true - } - } - return false -} - -func (f *file) isMain() bool { - if f.f.Name.Name == "main" { - return true - } - return false -} - -// lintPackageComment checks package comments. It complains if -// there is no package comment, or if it is not of the right form. -// This has a notable false positive in that a package comment -// could rightfully appear in a different file of the same package, -// but that's not easy to fix since this linter is file-oriented. -func (f *file) lintPackageComment() { - if f.isTest() { - return - } - - const ref = styleGuideBase + "#package-comments" - prefix := "Package " + f.f.Name.Name + " " - - // Look for a detached package comment. - // First, scan for the last comment that occurs before the "package" keyword. - var lastCG *ast.CommentGroup - for _, cg := range f.f.Comments { - if cg.Pos() > f.f.Package { - // Gone past "package" keyword. - break - } - lastCG = cg - } - if lastCG != nil && strings.HasPrefix(lastCG.Text(), prefix) { - endPos := f.fset.Position(lastCG.End()) - pkgPos := f.fset.Position(f.f.Package) - if endPos.Line+1 < pkgPos.Line { - // There isn't a great place to anchor this error; - // the start of the blank lines between the doc and the package statement - // is at least pointing at the location of the problem. - pos := token.Position{ - Filename: endPos.Filename, - // Offset not set; it is non-trivial, and doesn't appear to be needed. - Line: endPos.Line + 1, - Column: 1, - } - f.pkg.errorfAt(pos, 0.9, link(ref), category("comments"), "package comment is detached; there should be no blank lines between it and the package statement") - return - } - } - - if f.f.Doc == nil { - f.errorf(f.f, 0.2, link(ref), category("comments"), "should have a package comment, unless it's in another file for this package") - return - } - s := f.f.Doc.Text() - if ts := strings.TrimLeft(s, " \t"); ts != s { - f.errorf(f.f.Doc, 1, link(ref), category("comments"), "package comment should not have leading space") - s = ts - } - // Only non-main packages need to keep to this form. - if !f.pkg.main && !strings.HasPrefix(s, prefix) { - f.errorf(f.f.Doc, 1, link(ref), category("comments"), `package comment should be of the form "%s..."`, prefix) - } -} - -// lintBlankImports complains if a non-main package has blank imports that are -// not documented. -func (f *file) lintBlankImports() { - // In package main and in tests, we don't complain about blank imports. - if f.pkg.main || f.isTest() { - return - } - - // The first element of each contiguous group of blank imports should have - // an explanatory comment of some kind. - for i, imp := range f.f.Imports { - pos := f.fset.Position(imp.Pos()) - - if !isBlank(imp.Name) { - continue // Ignore non-blank imports. - } - if i > 0 { - prev := f.f.Imports[i-1] - prevPos := f.fset.Position(prev.Pos()) - if isBlank(prev.Name) && prevPos.Line+1 == pos.Line { - continue // A subsequent blank in a group. - } - } - - // This is the first blank import of a group. - if imp.Doc == nil && imp.Comment == nil { - ref := "" - f.errorf(imp, 1, link(ref), category("imports"), "a blank import should be only in a main or test package, or have a comment justifying it") - } - } -} - -// lintImports examines import blocks. -func (f *file) lintImports() { - for i, is := range f.f.Imports { - _ = i - if is.Name != nil && is.Name.Name == "." && !f.isTest() { - f.errorf(is, 1, link(styleGuideBase+"#import-dot"), category("imports"), "should not use dot imports") - } - - } -} - -const docCommentsLink = styleGuideBase + "#doc-comments" - -// lintExported examines the exported names. -// It complains if any required doc comments are missing, -// or if they are not of the right form. The exact rules are in -// lintFuncDoc, lintTypeDoc and lintValueSpecDoc; this function -// also tracks the GenDecl structure being traversed to permit -// doc comments for constants to be on top of the const block. -// It also complains if the names stutter when combined with -// the package name. -func (f *file) lintExported() { - if f.isTest() { - return - } - - var lastGen *ast.GenDecl // last GenDecl entered. - - // Set of GenDecls that have already had missing comments flagged. - genDeclMissingComments := make(map[*ast.GenDecl]bool) - - f.walk(func(node ast.Node) bool { - switch v := node.(type) { - case *ast.GenDecl: - if v.Tok == token.IMPORT { - return false - } - // token.CONST, token.TYPE or token.VAR - lastGen = v - return true - case *ast.FuncDecl: - f.lintFuncDoc(v) - if v.Recv == nil { - // Only check for stutter on functions, not methods. - // Method names are not used package-qualified. - f.checkStutter(v.Name, "func") - } - // Don't proceed inside funcs. - return false - case *ast.TypeSpec: - // inside a GenDecl, which usually has the doc - doc := v.Doc - if doc == nil { - doc = lastGen.Doc - } - f.lintTypeDoc(v, doc) - f.checkStutter(v.Name, "type") - // Don't proceed inside types. - return false - case *ast.ValueSpec: - f.lintValueSpecDoc(v, lastGen, genDeclMissingComments) - return false - } - return true - }) -} - -var ( - allCapsRE = regexp.MustCompile(`^[A-Z0-9_]+$`) - anyCapsRE = regexp.MustCompile(`[A-Z]`) -) - -// knownNameExceptions is a set of names that are known to be exempt from naming checks. -// This is usually because they are constrained by having to match names in the -// standard library. -var knownNameExceptions = map[string]bool{ - "LastInsertId": true, // must match database/sql - "kWh": true, -} - -func isInTopLevel(f *ast.File, ident *ast.Ident) bool { - path, _ := astutil.PathEnclosingInterval(f, ident.Pos(), ident.End()) - for _, f := range path { - switch f.(type) { - case *ast.File, *ast.GenDecl, *ast.ValueSpec, *ast.Ident: - continue - } - return false - } - return true -} - -// lintNames examines all names in the file. -// It complains if any use underscores or incorrect known initialisms. -func (f *file) lintNames() { - // Package names need slightly different handling than other names. - if strings.Contains(f.f.Name.Name, "_") && !strings.HasSuffix(f.f.Name.Name, "_test") { - f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("naming"), "don't use an underscore in package name") - } - if anyCapsRE.MatchString(f.f.Name.Name) { - f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("mixed-caps"), "don't use MixedCaps in package name; %s should be %s", f.f.Name.Name, strings.ToLower(f.f.Name.Name)) - } - - check := func(id *ast.Ident, thing string) { - if id.Name == "_" { - return - } - if knownNameExceptions[id.Name] { - return - } - - // Handle two common styles from other languages that don't belong in Go. - if len(id.Name) >= 5 && allCapsRE.MatchString(id.Name) && strings.Contains(id.Name, "_") { - capCount := 0 - for _, c := range id.Name { - if 'A' <= c && c <= 'Z' { - capCount++ - } - } - if capCount >= 2 { - f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use ALL_CAPS in Go names; use CamelCase") - return - } - } - if thing == "const" || (thing == "var" && isInTopLevel(f.f, id)) { - if len(id.Name) > 2 && id.Name[0] == 'k' && id.Name[1] >= 'A' && id.Name[1] <= 'Z' { - should := string(id.Name[1]+'a'-'A') + id.Name[2:] - f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use leading k in Go names; %s %s should be %s", thing, id.Name, should) - } - } - - should := lintName(id.Name) - if id.Name == should { - return - } - - if len(id.Name) > 2 && strings.Contains(id.Name[1:], "_") { - f.errorf(id, 0.9, link("http://golang.org/doc/effective_go.html#mixed-caps"), category("naming"), "don't use underscores in Go names; %s %s should be %s", thing, id.Name, should) - return - } - f.errorf(id, 0.8, link(styleGuideBase+"#initialisms"), category("naming"), "%s %s should be %s", thing, id.Name, should) - } - checkList := func(fl *ast.FieldList, thing string) { - if fl == nil { - return - } - for _, f := range fl.List { - for _, id := range f.Names { - check(id, thing) - } - } - } - f.walk(func(node ast.Node) bool { - switch v := node.(type) { - case *ast.AssignStmt: - if v.Tok == token.ASSIGN { - return true - } - for _, exp := range v.Lhs { - if id, ok := exp.(*ast.Ident); ok { - check(id, "var") - } - } - case *ast.FuncDecl: - if f.isTest() && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) { - return true - } - - thing := "func" - if v.Recv != nil { - thing = "method" - } - - // Exclude naming warnings for functions that are exported to C but - // not exported in the Go API. - // See https://github.com/golang/lint/issues/144. - if ast.IsExported(v.Name.Name) || !isCgoExported(v) { - check(v.Name, thing) - } - - checkList(v.Type.Params, thing+" parameter") - checkList(v.Type.Results, thing+" result") - case *ast.GenDecl: - if v.Tok == token.IMPORT { - return true - } - var thing string - switch v.Tok { - case token.CONST: - thing = "const" - case token.TYPE: - thing = "type" - case token.VAR: - thing = "var" - } - for _, spec := range v.Specs { - switch s := spec.(type) { - case *ast.TypeSpec: - check(s.Name, thing) - case *ast.ValueSpec: - for _, id := range s.Names { - check(id, thing) - } - } - } - case *ast.InterfaceType: - // Do not check interface method names. - // They are often constrainted by the method names of concrete types. - for _, x := range v.Methods.List { - ft, ok := x.Type.(*ast.FuncType) - if !ok { // might be an embedded interface name - continue - } - checkList(ft.Params, "interface method parameter") - checkList(ft.Results, "interface method result") - } - case *ast.RangeStmt: - if v.Tok == token.ASSIGN { - return true - } - if id, ok := v.Key.(*ast.Ident); ok { - check(id, "range var") - } - if id, ok := v.Value.(*ast.Ident); ok { - check(id, "range var") - } - case *ast.StructType: - for _, f := range v.Fields.List { - for _, id := range f.Names { - check(id, "struct field") - } - } - } - return true - }) -} - -// lintName returns a different name if it should be different. -func lintName(name string) (should string) { - // Fast path for simple cases: "_" and all lowercase. - if name == "_" { - return name - } - allLower := true - for _, r := range name { - if !unicode.IsLower(r) { - allLower = false - break - } - } - if allLower { - return name - } - - // Split camelCase at any lower->upper transition, and split on underscores. - // Check each word for common initialisms. - runes := []rune(name) - w, i := 0, 0 // index of start of word, scan - for i+1 <= len(runes) { - eow := false // whether we hit the end of a word - if i+1 == len(runes) { - eow = true - } else if runes[i+1] == '_' { - // underscore; shift the remainder forward over any run of underscores - eow = true - n := 1 - for i+n+1 < len(runes) && runes[i+n+1] == '_' { - n++ - } - - // Leave at most one underscore if the underscore is between two digits - if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) { - n-- - } - - copy(runes[i+1:], runes[i+n+1:]) - runes = runes[:len(runes)-n] - } else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) { - // lower->non-lower - eow = true - } - i++ - if !eow { - continue - } - - // [w,i) is a word. - word := string(runes[w:i]) - if u := strings.ToUpper(word); commonInitialisms[u] { - // Keep consistent case, which is lowercase only at the start. - if w == 0 && unicode.IsLower(runes[w]) { - u = strings.ToLower(u) - } - // All the common initialisms are ASCII, - // so we can replace the bytes exactly. - copy(runes[w:], []rune(u)) - } else if w > 0 && strings.ToLower(word) == word { - // already all lowercase, and not the first word, so uppercase the first character. - runes[w] = unicode.ToUpper(runes[w]) - } - w = i - } - return string(runes) -} - -// commonInitialisms is a set of common initialisms. -// Only add entries that are highly unlikely to be non-initialisms. -// For instance, "ID" is fine (Freudian code is rare), but "AND" is not. -var commonInitialisms = map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTP": true, - "HTTPS": true, - "ID": true, - "IP": true, - "JSON": true, - "LHS": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, -} - -// lintTypeDoc examines the doc comment on a type. -// It complains if they are missing from an exported type, -// or if they are not of the standard form. -func (f *file) lintTypeDoc(t *ast.TypeSpec, doc *ast.CommentGroup) { - if !ast.IsExported(t.Name.Name) { - return - } - if doc == nil { - f.errorf(t, 1, link(docCommentsLink), category("comments"), "exported type %v should have comment or be unexported", t.Name) - return - } - - s := doc.Text() - articles := [...]string{"A", "An", "The"} - for _, a := range articles { - if strings.HasPrefix(s, a+" ") { - s = s[len(a)+1:] - break - } - } - if !strings.HasPrefix(s, t.Name.Name+" ") { - f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported type %v should be of the form "%v ..." (with optional leading article)`, t.Name, t.Name) - } -} - -var commonMethods = map[string]bool{ - "Error": true, - "Read": true, - "ServeHTTP": true, - "String": true, - "Write": true, -} - -// lintFuncDoc examines doc comments on functions and methods. -// It complains if they are missing, or not of the right form. -// It has specific exclusions for well-known methods (see commonMethods above). -func (f *file) lintFuncDoc(fn *ast.FuncDecl) { - if !ast.IsExported(fn.Name.Name) { - // func is unexported - return - } - kind := "function" - name := fn.Name.Name - if fn.Recv != nil && len(fn.Recv.List) > 0 { - // method - kind = "method" - recv := receiverType(fn) - if !ast.IsExported(recv) { - // receiver is unexported - return - } - if commonMethods[name] { - return - } - switch name { - case "Len", "Less", "Swap": - if f.pkg.sortable[recv] { - return - } - } - name = recv + "." + name - } - if fn.Doc == nil { - f.errorf(fn, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment or be unexported", kind, name) - return - } - s := fn.Doc.Text() - prefix := fn.Name.Name + " " - if !strings.HasPrefix(s, prefix) { - f.errorf(fn.Doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix) - } -} - -// lintValueSpecDoc examines package-global variables and constants. -// It complains if they are not individually declared, -// or if they are not suitably documented in the right form (unless they are in a block that is commented). -func (f *file) lintValueSpecDoc(vs *ast.ValueSpec, gd *ast.GenDecl, genDeclMissingComments map[*ast.GenDecl]bool) { - kind := "var" - if gd.Tok == token.CONST { - kind = "const" - } - - if len(vs.Names) > 1 { - // Check that none are exported except for the first. - for _, n := range vs.Names[1:] { - if ast.IsExported(n.Name) { - f.errorf(vs, 1, category("comments"), "exported %s %s should have its own declaration", kind, n.Name) - return - } - } - } - - // Only one name. - name := vs.Names[0].Name - if !ast.IsExported(name) { - return - } - - if vs.Doc == nil && gd.Doc == nil { - if genDeclMissingComments[gd] { - return - } - block := "" - if kind == "const" && gd.Lparen.IsValid() { - block = " (or a comment on this block)" - } - f.errorf(vs, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment%s or be unexported", kind, name, block) - genDeclMissingComments[gd] = true - return - } - // If this GenDecl has parens and a comment, we don't check its comment form. - if gd.Lparen.IsValid() && gd.Doc != nil { - return - } - // The relevant text to check will be on either vs.Doc or gd.Doc. - // Use vs.Doc preferentially. - doc := vs.Doc - if doc == nil { - doc = gd.Doc - } - prefix := name + " " - if !strings.HasPrefix(doc.Text(), prefix) { - f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix) - } -} - -func (f *file) checkStutter(id *ast.Ident, thing string) { - pkg, name := f.f.Name.Name, id.Name - if !ast.IsExported(name) { - // unexported name - return - } - // A name stutters if the package name is a strict prefix - // and the next character of the name starts a new word. - if len(name) <= len(pkg) { - // name is too short to stutter. - // This permits the name to be the same as the package name. - return - } - if !strings.EqualFold(pkg, name[:len(pkg)]) { - return - } - // We can assume the name is well-formed UTF-8. - // If the next rune after the package name is uppercase or an underscore - // the it's starting a new word and thus this name stutters. - rem := name[len(pkg):] - if next, _ := utf8.DecodeRuneInString(rem); next == '_' || unicode.IsUpper(next) { - f.errorf(id, 0.8, link(styleGuideBase+"#package-names"), category("naming"), "%s name will be used as %s.%s by other packages, and that stutters; consider calling this %s", thing, pkg, name, rem) - } -} - -// zeroLiteral is a set of ast.BasicLit values that are zero values. -// It is not exhaustive. -var zeroLiteral = map[string]bool{ - "false": true, // bool - // runes - `'\x00'`: true, - `'\000'`: true, - // strings - `""`: true, - "``": true, - // numerics - "0": true, - "0.": true, - "0.0": true, - "0i": true, -} - -// lintElses examines else blocks. It complains about any else block whose if block ends in a return. -func (f *file) lintElses() { - // We don't want to flag if { } else if { } else { } constructions. - // They will appear as an IfStmt whose Else field is also an IfStmt. - // Record such a node so we ignore it when we visit it. - ignore := make(map[*ast.IfStmt]bool) - - f.walk(func(node ast.Node) bool { - ifStmt, ok := node.(*ast.IfStmt) - if !ok || ifStmt.Else == nil { - return true - } - if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { - ignore[elseif] = true - return true - } - if ignore[ifStmt] { - return true - } - if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok { - // only care about elses without conditions - return true - } - if len(ifStmt.Body.List) == 0 { - return true - } - shortDecl := false // does the if statement have a ":=" initialization statement? - if ifStmt.Init != nil { - if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE { - shortDecl = true - } - } - lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1] - if _, ok := lastStmt.(*ast.ReturnStmt); ok { - extra := "" - if shortDecl { - extra = " (move short variable declaration to its own line if necessary)" - } - f.errorf(ifStmt.Else, 1, link(styleGuideBase+"#indent-error-flow"), category("indent"), "if block ends with a return statement, so drop this else and outdent its block"+extra) - } - return true - }) -} - -// lintRanges examines range clauses. It complains about redundant constructions. -func (f *file) lintRanges() { - f.walk(func(node ast.Node) bool { - rs, ok := node.(*ast.RangeStmt) - if !ok { - return true - } - - if isIdent(rs.Key, "_") && (rs.Value == nil || isIdent(rs.Value, "_")) { - p := f.errorf(rs.Key, 1, category("range-loop"), "should omit values from range; this loop is equivalent to `for range ...`") - - newRS := *rs // shallow copy - newRS.Value = nil - newRS.Key = nil - p.ReplacementLine = f.firstLineOf(&newRS, rs) - - return true - } - - if isIdent(rs.Value, "_") { - p := f.errorf(rs.Value, 1, category("range-loop"), "should omit 2nd value from range; this loop is equivalent to `for %s %s range ...`", f.render(rs.Key), rs.Tok) - - newRS := *rs // shallow copy - newRS.Value = nil - p.ReplacementLine = f.firstLineOf(&newRS, rs) - } - - return true - }) -} - -// lintErrorf examines errors.New and testing.Error calls. It complains if its only argument is an fmt.Sprintf invocation. -func (f *file) lintErrorf() { - f.walk(func(node ast.Node) bool { - ce, ok := node.(*ast.CallExpr) - if !ok || len(ce.Args) != 1 { - return true - } - isErrorsNew := isPkgDot(ce.Fun, "errors", "New") - var isTestingError bool - se, ok := ce.Fun.(*ast.SelectorExpr) - if ok && se.Sel.Name == "Error" { - if typ := f.pkg.typeOf(se.X); typ != nil { - isTestingError = typ.String() == "*testing.T" - } - } - if !isErrorsNew && !isTestingError { - return true - } - if !f.imports("errors") { - return true - } - arg := ce.Args[0] - ce, ok = arg.(*ast.CallExpr) - if !ok || !isPkgDot(ce.Fun, "fmt", "Sprintf") { - return true - } - errorfPrefix := "fmt" - if isTestingError { - errorfPrefix = f.render(se.X) - } - p := f.errorf(node, 1, category("errors"), "should replace %s(fmt.Sprintf(...)) with %s.Errorf(...)", f.render(se), errorfPrefix) - - m := f.srcLineWithMatch(ce, `^(.*)`+f.render(se)+`\(fmt\.Sprintf\((.*)\)\)(.*)$`) - if m != nil { - p.ReplacementLine = m[1] + errorfPrefix + ".Errorf(" + m[2] + ")" + m[3] - } - - return true - }) -} - -// lintErrors examines global error vars. It complains if they aren't named in the standard way. -func (f *file) lintErrors() { - for _, decl := range f.f.Decls { - gd, ok := decl.(*ast.GenDecl) - if !ok || gd.Tok != token.VAR { - continue - } - for _, spec := range gd.Specs { - spec := spec.(*ast.ValueSpec) - if len(spec.Names) != 1 || len(spec.Values) != 1 { - continue - } - ce, ok := spec.Values[0].(*ast.CallExpr) - if !ok { - continue - } - if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { - continue - } - - id := spec.Names[0] - prefix := "err" - if id.IsExported() { - prefix = "Err" - } - if !strings.HasPrefix(id.Name, prefix) { - f.errorf(id, 0.9, category("naming"), "error var %s should have name of the form %sFoo", id.Name, prefix) - } - } - } -} - -func lintErrorString(s string) (isClean bool, conf float64) { - const basicConfidence = 0.8 - const capConfidence = basicConfidence - 0.2 - first, firstN := utf8.DecodeRuneInString(s) - last, _ := utf8.DecodeLastRuneInString(s) - if last == '.' || last == ':' || last == '!' || last == '\n' { - return false, basicConfidence - } - if unicode.IsUpper(first) { - // People use proper nouns and exported Go identifiers in error strings, - // so decrease the confidence of warnings for capitalization. - if len(s) <= firstN { - return false, capConfidence - } - // Flag strings starting with something that doesn't look like an initialism. - if second, _ := utf8.DecodeRuneInString(s[firstN:]); !unicode.IsUpper(second) { - return false, capConfidence - } - } - return true, 0 -} - -// lintErrorStrings examines error strings. -// It complains if they are capitalized or end in punctuation or a newline. -func (f *file) lintErrorStrings() { - f.walk(func(node ast.Node) bool { - ce, ok := node.(*ast.CallExpr) - if !ok { - return true - } - if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { - return true - } - if len(ce.Args) < 1 { - return true - } - str, ok := ce.Args[0].(*ast.BasicLit) - if !ok || str.Kind != token.STRING { - return true - } - s, _ := strconv.Unquote(str.Value) // can assume well-formed Go - if s == "" { - return true - } - clean, conf := lintErrorString(s) - if clean { - return true - } - - f.errorf(str, conf, link(styleGuideBase+"#error-strings"), category("errors"), - "error strings should not be capitalized or end with punctuation or a newline") - return true - }) -} - -// lintReceiverNames examines receiver names. It complains about inconsistent -// names used for the same type and names such as "this". -func (f *file) lintReceiverNames() { - typeReceiver := map[string]string{} - f.walk(func(n ast.Node) bool { - fn, ok := n.(*ast.FuncDecl) - if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { - return true - } - names := fn.Recv.List[0].Names - if len(names) < 1 { - return true - } - name := names[0].Name - const ref = styleGuideBase + "#receiver-names" - if name == "_" { - f.errorf(n, 1, link(ref), category("naming"), `receiver name should not be an underscore, omit the name if it is unused`) - return true - } - if name == "this" || name == "self" { - f.errorf(n, 1, link(ref), category("naming"), `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`) - return true - } - recv := receiverType(fn) - if prev, ok := typeReceiver[recv]; ok && prev != name { - f.errorf(n, 1, link(ref), category("naming"), "receiver name %s should be consistent with previous receiver name %s for %s", name, prev, recv) - return true - } - typeReceiver[recv] = name - return true - }) -} - -// lintIncDec examines statements that increment or decrement a variable. -// It complains if they don't use x++ or x--. -func (f *file) lintIncDec() { - f.walk(func(n ast.Node) bool { - as, ok := n.(*ast.AssignStmt) - if !ok { - return true - } - if len(as.Lhs) != 1 { - return true - } - if !isOne(as.Rhs[0]) { - return true - } - var suffix string - switch as.Tok { - case token.ADD_ASSIGN: - suffix = "++" - case token.SUB_ASSIGN: - suffix = "--" - default: - return true - } - f.errorf(as, 0.8, category("unary-op"), "should replace %s with %s%s", f.render(as), f.render(as.Lhs[0]), suffix) - return true - }) -} - -// lintErrorReturn examines function declarations that return an error. -// It complains if the error isn't the last parameter. -func (f *file) lintErrorReturn() { - f.walk(func(n ast.Node) bool { - fn, ok := n.(*ast.FuncDecl) - if !ok || fn.Type.Results == nil { - return true - } - ret := fn.Type.Results.List - if len(ret) <= 1 { - return true - } - if isIdent(ret[len(ret)-1].Type, "error") { - return true - } - // An error return parameter should be the last parameter. - // Flag any error parameters found before the last. - for _, r := range ret[:len(ret)-1] { - if isIdent(r.Type, "error") { - f.errorf(fn, 0.9, category("arg-order"), "error should be the last type when returning multiple items") - break // only flag one - } - } - return true - }) -} - -// lintUnexportedReturn examines exported function declarations. -// It complains if any return an unexported type. -func (f *file) lintUnexportedReturn() { - f.walk(func(n ast.Node) bool { - fn, ok := n.(*ast.FuncDecl) - if !ok { - return true - } - if fn.Type.Results == nil { - return false - } - if !fn.Name.IsExported() { - return false - } - thing := "func" - if fn.Recv != nil && len(fn.Recv.List) > 0 { - thing = "method" - if !ast.IsExported(receiverType(fn)) { - // Don't report exported methods of unexported types, - // such as private implementations of sort.Interface. - return false - } - } - for _, ret := range fn.Type.Results.List { - typ := f.pkg.typeOf(ret.Type) - if exportedType(typ) { - continue - } - f.errorf(ret.Type, 0.8, category("unexported-type-in-api"), - "exported %s %s returns unexported type %s, which can be annoying to use", - thing, fn.Name.Name, typ) - break // only flag one - } - return false - }) -} - -// exportedType reports whether typ is an exported type. -// It is imprecise, and will err on the side of returning true, -// such as for composite types. -func exportedType(typ types.Type) bool { - switch T := typ.(type) { - case *types.Named: - // Builtin types have no package. - return T.Obj().Pkg() == nil || T.Obj().Exported() - case *types.Map: - return exportedType(T.Key()) && exportedType(T.Elem()) - case interface { - Elem() types.Type - }: // array, slice, pointer, chan - return exportedType(T.Elem()) - } - // Be conservative about other types, such as struct, interface, etc. - return true -} - -// timeSuffixes is a list of name suffixes that imply a time unit. -// This is not an exhaustive list. -var timeSuffixes = []string{ - "Sec", "Secs", "Seconds", - "Msec", "Msecs", - "Milli", "Millis", "Milliseconds", - "Usec", "Usecs", "Microseconds", - "MS", "Ms", -} - -func (f *file) lintTimeNames() { - f.walk(func(node ast.Node) bool { - v, ok := node.(*ast.ValueSpec) - if !ok { - return true - } - for _, name := range v.Names { - origTyp := f.pkg.typeOf(name) - // Look for time.Duration or *time.Duration; - // the latter is common when using flag.Duration. - typ := origTyp - if pt, ok := typ.(*types.Pointer); ok { - typ = pt.Elem() - } - if !f.pkg.isNamedType(typ, "time", "Duration") { - continue - } - suffix := "" - for _, suf := range timeSuffixes { - if strings.HasSuffix(name.Name, suf) { - suffix = suf - break - } - } - if suffix == "" { - continue - } - f.errorf(v, 0.9, category("time"), "var %s is of type %v; don't use unit-specific suffix %q", name.Name, origTyp, suffix) - } - return true - }) -} - -// lintContextKeyTypes checks for call expressions to context.WithValue with -// basic types used for the key argument. -// See: https://golang.org/issue/17293 -func (f *file) lintContextKeyTypes() { - f.walk(func(node ast.Node) bool { - switch node := node.(type) { - case *ast.CallExpr: - f.checkContextKeyType(node) - } - - return true - }) -} - -// checkContextKeyType reports an error if the call expression calls -// context.WithValue with a key argument of basic type. -func (f *file) checkContextKeyType(x *ast.CallExpr) { - sel, ok := x.Fun.(*ast.SelectorExpr) - if !ok { - return - } - pkg, ok := sel.X.(*ast.Ident) - if !ok || pkg.Name != "context" { - return - } - if sel.Sel.Name != "WithValue" { - return - } - - // key is second argument to context.WithValue - if len(x.Args) != 3 { - return - } - key := f.pkg.typesInfo.Types[x.Args[1]] - - if ktyp, ok := key.Type.(*types.Basic); ok && ktyp.Kind() != types.Invalid { - f.errorf(x, 1.0, category("context"), fmt.Sprintf("should not use basic type %s as key in context.WithValue", key.Type)) - } -} - -// lintContextArgs examines function declarations that contain an -// argument with a type of context.Context -// It complains if that argument isn't the first parameter. -func (f *file) lintContextArgs() { - f.walk(func(n ast.Node) bool { - fn, ok := n.(*ast.FuncDecl) - if !ok || len(fn.Type.Params.List) <= 1 { - return true - } - // A context.Context should be the first parameter of a function. - // Flag any that show up after the first. - for _, arg := range fn.Type.Params.List[1:] { - if isPkgDot(arg.Type, "context", "Context") { - f.errorf(fn, 0.9, link("https://golang.org/pkg/context/"), category("arg-order"), "context.Context should be the first parameter of a function") - break // only flag one - } - } - return true - }) -} - -// containsComments returns whether the interval [start, end) contains any -// comments without "// MATCH " prefix. -func (f *file) containsComments(start, end token.Pos) bool { - for _, cgroup := range f.f.Comments { - comments := cgroup.List - if comments[0].Slash >= end { - // All comments starting with this group are after end pos. - return false - } - if comments[len(comments)-1].Slash < start { - // Comments group ends before start pos. - continue - } - for _, c := range comments { - if start <= c.Slash && c.Slash < end && !strings.HasPrefix(c.Text, "// MATCH ") { - return true - } - } - } - return false -} - -// receiverType returns the named type of the method receiver, sans "*", -// or "invalid-type" if fn.Recv is ill formed. -func receiverType(fn *ast.FuncDecl) string { - switch e := fn.Recv.List[0].Type.(type) { - case *ast.Ident: - return e.Name - case *ast.StarExpr: - if id, ok := e.X.(*ast.Ident); ok { - return id.Name - } - } - // The parser accepts much more than just the legal forms. - return "invalid-type" -} - -func (f *file) walk(fn func(ast.Node) bool) { - ast.Walk(walker(fn), f.f) -} - -func (f *file) render(x interface{}) string { - var buf bytes.Buffer - if err := printer.Fprint(&buf, f.fset, x); err != nil { - panic(err) - } - return buf.String() -} - -func (f *file) debugRender(x interface{}) string { - var buf bytes.Buffer - if err := ast.Fprint(&buf, f.fset, x, nil); err != nil { - panic(err) - } - return buf.String() -} - -// walker adapts a function to satisfy the ast.Visitor interface. -// The function return whether the walk should proceed into the node's children. -type walker func(ast.Node) bool - -func (w walker) Visit(node ast.Node) ast.Visitor { - if w(node) { - return w - } - return nil -} - -func isIdent(expr ast.Expr, ident string) bool { - id, ok := expr.(*ast.Ident) - return ok && id.Name == ident -} - -// isBlank returns whether id is the blank identifier "_". -// If id == nil, the answer is false. -func isBlank(id *ast.Ident) bool { return id != nil && id.Name == "_" } - -func isPkgDot(expr ast.Expr, pkg, name string) bool { - sel, ok := expr.(*ast.SelectorExpr) - return ok && isIdent(sel.X, pkg) && isIdent(sel.Sel, name) -} - -func isOne(expr ast.Expr) bool { - lit, ok := expr.(*ast.BasicLit) - return ok && lit.Kind == token.INT && lit.Value == "1" -} - -func isCgoExported(f *ast.FuncDecl) bool { - if f.Recv != nil || f.Doc == nil { - return false - } - - cgoExport := regexp.MustCompile(fmt.Sprintf("(?m)^//export %s$", regexp.QuoteMeta(f.Name.Name))) - for _, c := range f.Doc.List { - if cgoExport.MatchString(c.Text) { - return true - } - } - return false -} - -var basicTypeKinds = map[types.BasicKind]string{ - types.UntypedBool: "bool", - types.UntypedInt: "int", - types.UntypedRune: "rune", - types.UntypedFloat: "float64", - types.UntypedComplex: "complex128", - types.UntypedString: "string", -} - -// isUntypedConst reports whether expr is an untyped constant, -// and indicates what its default type is. -// scope may be nil. -func (f *file) isUntypedConst(expr ast.Expr) (defType string, ok bool) { - // Re-evaluate expr outside of its context to see if it's untyped. - // (An expr evaluated within, for example, an assignment context will get the type of the LHS.) - exprStr := f.render(expr) - tv, err := types.Eval(f.fset, f.pkg.typesPkg, expr.Pos(), exprStr) - if err != nil { - return "", false - } - if b, ok := tv.Type.(*types.Basic); ok { - if dt, ok := basicTypeKinds[b.Kind()]; ok { - return dt, true - } - } - - return "", false -} - -// firstLineOf renders the given node and returns its first line. -// It will also match the indentation of another node. -func (f *file) firstLineOf(node, match ast.Node) string { - line := f.render(node) - if i := strings.Index(line, "\n"); i >= 0 { - line = line[:i] - } - return f.indentOf(match) + line -} - -func (f *file) indentOf(node ast.Node) string { - line := srcLine(f.src, f.fset.Position(node.Pos())) - for i, r := range line { - switch r { - case ' ', '\t': - default: - return line[:i] - } - } - return line // unusual or empty line -} - -func (f *file) srcLineWithMatch(node ast.Node, pattern string) (m []string) { - line := srcLine(f.src, f.fset.Position(node.Pos())) - line = strings.TrimSuffix(line, "\n") - rx := regexp.MustCompile(pattern) - return rx.FindStringSubmatch(line) -} - -// imports returns true if the current file imports the specified package path. -func (f *file) imports(importPath string) bool { - all := astutil.Imports(f.fset, f.f) - for _, p := range all { - for _, i := range p { - uq, err := strconv.Unquote(i.Path.Value) - if err == nil && importPath == uq { - return true - } - } - } - return false -} - -// srcLine returns the complete line at p, including the terminating newline. -func srcLine(src []byte, p token.Position) string { - // Run to end of line in both directions if not at line start/end. - lo, hi := p.Offset, p.Offset+1 - for lo > 0 && src[lo-1] != '\n' { - lo-- - } - for hi < len(src) && src[hi-1] != '\n' { - hi++ - } - return string(src[lo:hi]) -} diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go index 73804d347..ff7acf2d5 100644 --- a/vendor/golang.org/x/net/html/const.go +++ b/vendor/golang.org/x/net/html/const.go @@ -52,7 +52,7 @@ var isSpecialElementMap = map[string]bool{ "iframe": true, "img": true, "input": true, - "keygen": true, + "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility. "li": true, "link": true, "listing": true, diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go index 74774c458..9da9e9dc4 100644 --- a/vendor/golang.org/x/net/html/foreign.go +++ b/vendor/golang.org/x/net/html/foreign.go @@ -161,65 +161,62 @@ var mathMLAttributeAdjustments = map[string]string{ } var svgAttributeAdjustments = map[string]string{ - "attributename": "attributeName", - "attributetype": "attributeType", - "basefrequency": "baseFrequency", - "baseprofile": "baseProfile", - "calcmode": "calcMode", - "clippathunits": "clipPathUnits", - "contentscripttype": "contentScriptType", - "contentstyletype": "contentStyleType", - "diffuseconstant": "diffuseConstant", - "edgemode": "edgeMode", - "externalresourcesrequired": "externalResourcesRequired", - "filterunits": "filterUnits", - "glyphref": "glyphRef", - "gradienttransform": "gradientTransform", - "gradientunits": "gradientUnits", - "kernelmatrix": "kernelMatrix", - "kernelunitlength": "kernelUnitLength", - "keypoints": "keyPoints", - "keysplines": "keySplines", - "keytimes": "keyTimes", - "lengthadjust": "lengthAdjust", - "limitingconeangle": "limitingConeAngle", - "markerheight": "markerHeight", - "markerunits": "markerUnits", - "markerwidth": "markerWidth", - "maskcontentunits": "maskContentUnits", - "maskunits": "maskUnits", - "numoctaves": "numOctaves", - "pathlength": "pathLength", - "patterncontentunits": "patternContentUnits", - "patterntransform": "patternTransform", - "patternunits": "patternUnits", - "pointsatx": "pointsAtX", - "pointsaty": "pointsAtY", - "pointsatz": "pointsAtZ", - "preservealpha": "preserveAlpha", - "preserveaspectratio": "preserveAspectRatio", - "primitiveunits": "primitiveUnits", - "refx": "refX", - "refy": "refY", - "repeatcount": "repeatCount", - "repeatdur": "repeatDur", - "requiredextensions": "requiredExtensions", - "requiredfeatures": "requiredFeatures", - "specularconstant": "specularConstant", - "specularexponent": "specularExponent", - "spreadmethod": "spreadMethod", - "startoffset": "startOffset", - "stddeviation": "stdDeviation", - "stitchtiles": "stitchTiles", - "surfacescale": "surfaceScale", - "systemlanguage": "systemLanguage", - "tablevalues": "tableValues", - "targetx": "targetX", - "targety": "targetY", - "textlength": "textLength", - "viewbox": "viewBox", - "viewtarget": "viewTarget", - "xchannelselector": "xChannelSelector", - "ychannelselector": "yChannelSelector", - "zoomandpan": "zoomAndPan", + "attributename": "attributeName", + "attributetype": "attributeType", + "basefrequency": "baseFrequency", + "baseprofile": "baseProfile", + "calcmode": "calcMode", + "clippathunits": "clipPathUnits", + "diffuseconstant": "diffuseConstant", + "edgemode": "edgeMode", + "filterunits": "filterUnits", + "glyphref": "glyphRef", + "gradienttransform": "gradientTransform", + "gradientunits": "gradientUnits", + "kernelmatrix": "kernelMatrix", + "kernelunitlength": "kernelUnitLength", + "keypoints": "keyPoints", + "keysplines": "keySplines", + "keytimes": "keyTimes", + "lengthadjust": "lengthAdjust", + "limitingconeangle": "limitingConeAngle", + "markerheight": "markerHeight", + "markerunits": "markerUnits", + "markerwidth": "markerWidth", + "maskcontentunits": "maskContentUnits", + "maskunits": "maskUnits", + "numoctaves": "numOctaves", + "pathlength": "pathLength", + "patterncontentunits": "patternContentUnits", + "patterntransform": "patternTransform", + "patternunits": "patternUnits", + "pointsatx": "pointsAtX", + "pointsaty": "pointsAtY", + "pointsatz": "pointsAtZ", + "preservealpha": "preserveAlpha", + "preserveaspectratio": "preserveAspectRatio", + "primitiveunits": "primitiveUnits", + "refx": "refX", + "refy": "refY", + "repeatcount": "repeatCount", + "repeatdur": "repeatDur", + "requiredextensions": "requiredExtensions", + "requiredfeatures": "requiredFeatures", + "specularconstant": "specularConstant", + "specularexponent": "specularExponent", + "spreadmethod": "spreadMethod", + "startoffset": "startOffset", + "stddeviation": "stdDeviation", + "stitchtiles": "stitchTiles", + "surfacescale": "surfaceScale", + "systemlanguage": "systemLanguage", + "tablevalues": "tableValues", + "targetx": "targetX", + "targety": "targetY", + "textlength": "textLength", + "viewbox": "viewBox", + "viewtarget": "viewTarget", + "xchannelselector": "xChannelSelector", + "ychannelselector": "yChannelSelector", + "zoomandpan": "zoomAndPan", } diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go index 2cd12fc81..f91466f7c 100644 --- a/vendor/golang.org/x/net/html/parse.go +++ b/vendor/golang.org/x/net/html/parse.go @@ -728,7 +728,13 @@ func inHeadNoscriptIM(p *parser) bool { return inBodyIM(p) case a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Style: return inHeadIM(p) - case a.Head, a.Noscript: + case a.Head: + // Ignore the token. + return true + case a.Noscript: + // Don't let the tokenizer go into raw text mode even when a